diff --git a/ckpts/universal/global_step20/zero/15.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/15.attention.dense.weight/exp_avg.pt
new file mode 100644
index 0000000000000000000000000000000000000000..3d3abaf30c4480200a41529406e9a6671d0a9d39
--- /dev/null
+++ b/ckpts/universal/global_step20/zero/15.attention.dense.weight/exp_avg.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f517a90b182dfc13198c7d4a24cd63a138b5968d72944605c412ec085c518404
+size 16778396
diff --git a/ckpts/universal/global_step20/zero/15.attention.dense.weight/fp32.pt b/ckpts/universal/global_step20/zero/15.attention.dense.weight/fp32.pt
new file mode 100644
index 0000000000000000000000000000000000000000..85de3878c74b3e30b18e052212d77c8314a91f30
--- /dev/null
+++ b/ckpts/universal/global_step20/zero/15.attention.dense.weight/fp32.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a29804e6c56792e55194ac2972b5a00e29fb4d871134bec165ec9b52f799f10d
+size 16778317
diff --git a/ckpts/universal/global_step20/zero/15.mlp.dense_4h_to_h.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/15.mlp.dense_4h_to_h.weight/exp_avg.pt
new file mode 100644
index 0000000000000000000000000000000000000000..e09233cfb90a67c4a1cb4b555458800c8520220e
--- /dev/null
+++ b/ckpts/universal/global_step20/zero/15.mlp.dense_4h_to_h.weight/exp_avg.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6ea36382d054b1f217f5323818c4507bacf660a2291c6cd0ecf71fc601e14084
+size 33555612
diff --git a/ckpts/universal/global_step20/zero/15.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/15.mlp.dense_4h_to_h.weight/exp_avg_sq.pt
new file mode 100644
index 0000000000000000000000000000000000000000..348efd8b3a684ae0fc877da35a139f13b80e3ecc
--- /dev/null
+++ b/ckpts/universal/global_step20/zero/15.mlp.dense_4h_to_h.weight/exp_avg_sq.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:624d3ec33cde3729327c8cdd4fad871e8dd3fc3369fff103d89f9b31d88132e9
+size 33555627
diff --git a/ckpts/universal/global_step20/zero/23.attention.query_key_value.weight/fp32.pt b/ckpts/universal/global_step20/zero/23.attention.query_key_value.weight/fp32.pt
new file mode 100644
index 0000000000000000000000000000000000000000..a5c5a5d670ff0651a9017645f1e4e25465fbffd1
--- /dev/null
+++ b/ckpts/universal/global_step20/zero/23.attention.query_key_value.weight/fp32.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:03a1643a820a072efeccf06e8a7b4deba3f7da62e083f5db59304f9370940ebb
+size 50332749
diff --git a/lm-evaluation-harness/tests/testdata/arithmetic_2dm-v0-res.json b/lm-evaluation-harness/tests/testdata/arithmetic_2dm-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..8fc5d47310794c3ec4228c51ccb05e58c90aad5c
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/arithmetic_2dm-v0-res.json
@@ -0,0 +1 @@
+{"results": {"arithmetic_2dm": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"arithmetic_2dm": 0}}
\ No newline at end of file
diff --git a/venv/lib/python3.10/site-packages/transformers/models/bert_generation/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/bert_generation/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..14cf8bb5879320c3838808bea5715ac06b046fd9
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/bert_generation/__init__.py
@@ -0,0 +1,71 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available
+
+
+_import_structure = {"configuration_bert_generation": ["BertGenerationConfig"]}
+
+try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_bert_generation"] = ["BertGenerationTokenizer"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_bert_generation"] = [
+ "BertGenerationDecoder",
+ "BertGenerationEncoder",
+ "BertGenerationPreTrainedModel",
+ "load_tf_weights_in_bert_generation",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_bert_generation import BertGenerationConfig
+
+ try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_bert_generation import BertGenerationTokenizer
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_bert_generation import (
+ BertGenerationDecoder,
+ BertGenerationEncoder,
+ BertGenerationPreTrainedModel,
+ load_tf_weights_in_bert_generation,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1919cb08679e1ee365b0b63b51d0e97549638559
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/configuration_bert_generation.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/configuration_bert_generation.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9c8a517c379ed646ff9b44360e5ca0c4e2f1dca2
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/configuration_bert_generation.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/modeling_bert_generation.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/modeling_bert_generation.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5b6050040ec36782c4d3edd49fb876d6a76f12e4
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/modeling_bert_generation.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/tokenization_bert_generation.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/tokenization_bert_generation.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3c08e9594ecdc895c20b67a38d7cbc2b9f1aea20
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/tokenization_bert_generation.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/bert_generation/configuration_bert_generation.py b/venv/lib/python3.10/site-packages/transformers/models/bert_generation/configuration_bert_generation.py
new file mode 100644
index 0000000000000000000000000000000000000000..841aec5c0fb7acc3fb651aa213bf4cf2e1a6a581
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/bert_generation/configuration_bert_generation.py
@@ -0,0 +1,124 @@
+# coding=utf-8
+# Copyright 2020 The Google AI Language Team Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" BertGeneration model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+
+
+class BertGenerationConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`BertGenerationPreTrainedModel`]. It is used to
+ instantiate a BertGeneration model according to the specified arguments, defining the model architecture.
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the BertGeneration
+ [google/bert_for_seq_generation_L-24_bbc_encoder](https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder)
+ architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 50358):
+ Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`BertGeneration`].
+ hidden_size (`int`, *optional*, defaults to 1024):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 24):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 4096):
+ Dimensionality of the "intermediate" (often called feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ pad_token_id (`int`, *optional*, defaults to 0):
+ Padding token id.
+ bos_token_id (`int`, *optional*, defaults to 2):
+ Beginning of stream token id.
+ eos_token_id (`int`, *optional*, defaults to 1):
+ End of stream token id.
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`.
+
+ Examples:
+
+ ```python
+ >>> from transformers import BertGenerationConfig, BertGenerationEncoder
+
+ >>> # Initializing a BertGeneration config
+ >>> configuration = BertGenerationConfig()
+
+ >>> # Initializing a model (with random weights) from the config
+ >>> model = BertGenerationEncoder(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "bert-generation"
+
+ def __init__(
+ self,
+ vocab_size=50358,
+ hidden_size=1024,
+ num_hidden_layers=24,
+ num_attention_heads=16,
+ intermediate_size=4096,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.1,
+ attention_probs_dropout_prob=0.1,
+ max_position_embeddings=512,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ pad_token_id=0,
+ bos_token_id=2,
+ eos_token_id=1,
+ position_embedding_type="absolute",
+ use_cache=True,
+ **kwargs,
+ ):
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
+
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.hidden_act = hidden_act
+ self.intermediate_size = intermediate_size
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.max_position_embeddings = max_position_embeddings
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.position_embedding_type = position_embedding_type
+ self.use_cache = use_cache
diff --git a/venv/lib/python3.10/site-packages/transformers/models/bert_generation/modeling_bert_generation.py b/venv/lib/python3.10/site-packages/transformers/models/bert_generation/modeling_bert_generation.py
new file mode 100644
index 0000000000000000000000000000000000000000..b7250f6f7b926fc21102007ce34568d9276615f9
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/bert_generation/modeling_bert_generation.py
@@ -0,0 +1,1008 @@
+# coding=utf-8
+# Copyright 2020 The Google AI Language Team Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch BERT model specific for generation."""
+
+import math
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import CrossEntropyLoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_bert_generation import BertGenerationConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "google/bert_for_seq_generation_L-24_bbc_encoder"
+_CONFIG_FOR_DOC = "BertGenerationConfig"
+
+
+# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->BertGeneration
+class BertGenerationSelfOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->BertGeneration
+class BertGenerationSelfAttention(nn.Module):
+ def __init__(self, config, position_embedding_type=None):
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads})"
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+ self.position_embedding_type = position_embedding_type or getattr(
+ config, "position_embedding_type", "absolute"
+ )
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ self.max_position_embeddings = config.max_position_embeddings
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
+
+ self.is_decoder = config.is_decoder
+
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ mixed_query_layer = self.query(hidden_states)
+
+ # If this is instantiated as a cross-attention module, the keys
+ # and values come from an encoder; the attention mask needs to be
+ # such that the encoder's padding tokens are not attended to.
+ is_cross_attention = encoder_hidden_states is not None
+
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_layer = past_key_value[0]
+ value_layer = past_key_value[1]
+ attention_mask = encoder_attention_mask
+ elif is_cross_attention:
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
+ attention_mask = encoder_attention_mask
+ elif past_key_value is not None:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
+ else:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ use_cache = past_key_value is not None
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_layer, value_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
+ if use_cache:
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
+ -1, 1
+ )
+ else:
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
+ distance = position_ids_l - position_ids_r
+
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
+
+ if self.position_embedding_type == "relative_key":
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores
+ elif self.position_embedding_type == "relative_key_query":
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in BertGenerationModel forward() function)
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ if self.is_decoder:
+ outputs = outputs + (past_key_value,)
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->BertGeneration
+class BertGenerationAttention(nn.Module):
+ def __init__(self, config, position_embedding_type=None):
+ super().__init__()
+ self.self = BertGenerationSelfAttention(config, position_embedding_type=position_embedding_type)
+ self.output = BertGenerationSelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.self.query = prune_linear_layer(self.self.query, index)
+ self.self.key = prune_linear_layer(self.self.key, index)
+ self.self.value = prune_linear_layer(self.self.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ self_outputs = self.self(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+ attention_output = self.output(self_outputs[0], hidden_states)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->BertGeneration
+class BertGenerationIntermediate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->BertGeneration
+class BertGenerationOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->BertGeneration
+class BertGenerationLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.attention = BertGenerationAttention(config)
+ self.is_decoder = config.is_decoder
+ self.add_cross_attention = config.add_cross_attention
+ if self.add_cross_attention:
+ if not self.is_decoder:
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
+ self.crossattention = BertGenerationAttention(config, position_embedding_type="absolute")
+ self.intermediate = BertGenerationIntermediate(config)
+ self.output = BertGenerationOutput(config)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ self_attention_outputs = self.attention(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ output_attentions=output_attentions,
+ past_key_value=self_attn_past_key_value,
+ )
+ attention_output = self_attention_outputs[0]
+
+ # if decoder, the last output is tuple of self-attn cache
+ if self.is_decoder:
+ outputs = self_attention_outputs[1:-1]
+ present_key_value = self_attention_outputs[-1]
+ else:
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ cross_attn_present_key_value = None
+ if self.is_decoder and encoder_hidden_states is not None:
+ if not hasattr(self, "crossattention"):
+ raise ValueError(
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
+ " by setting `config.add_cross_attention=True`"
+ )
+
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ cross_attention_outputs = self.crossattention(
+ attention_output,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ cross_attn_past_key_value,
+ output_attentions,
+ )
+ attention_output = cross_attention_outputs[0]
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
+
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
+ cross_attn_present_key_value = cross_attention_outputs[-1]
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ layer_output = apply_chunking_to_forward(
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
+ )
+ outputs = (layer_output,) + outputs
+
+ # if decoder, return the attn key/values as the last output
+ if self.is_decoder:
+ outputs = outputs + (present_key_value,)
+
+ return outputs
+
+ def feed_forward_chunk(self, attention_output):
+ intermediate_output = self.intermediate(attention_output)
+ layer_output = self.output(intermediate_output, attention_output)
+ return layer_output
+
+
+# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->BertGeneration
+class BertEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList([BertGenerationLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = False,
+ output_hidden_states: Optional[bool] = False,
+ return_dict: Optional[bool] = True,
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ next_decoder_cache = () if use_cache else None
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+ past_key_value = past_key_values[i] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+ if use_cache:
+ next_decoder_cache += (layer_outputs[-1],)
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+ if self.config.add_cross_attention:
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ hidden_states,
+ next_decoder_cache,
+ all_hidden_states,
+ all_self_attentions,
+ all_cross_attentions,
+ ]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_decoder_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+def load_tf_weights_in_bert_generation(
+ model, tf_hub_path, model_class, is_encoder_named_decoder=False, is_encoder=False
+):
+ try:
+ import numpy as np
+ import tensorflow.compat.v1 as tf
+ import tensorflow_hub as hub
+ import tensorflow_text # noqa: F401
+
+ tf.disable_eager_execution()
+ except ImportError:
+ logger.error(
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
+ "https://www.tensorflow.org/install/ for installation instructions."
+ )
+ raise
+ tf_model = hub.Module(tf_hub_path)
+ init = tf.global_variables_initializer()
+ with tf.Session() as sess:
+ init.run()
+ all_variables = tf_model.variable_map
+ keep_track_variables = all_variables.copy()
+ for key in list(all_variables.keys()):
+ if "global" in key:
+ logger.info(f"Skipping {key}...")
+ continue
+ if not is_encoder:
+ model_pointer = getattr(model, model_class)
+ else:
+ model_pointer = model
+ is_embedding = False
+ logger.info(f"Trying to match {key}...")
+ # remove start_string = "module/bert/"
+ sub_layers = key.split("/")[2:]
+ if is_encoder_named_decoder and sub_layers[0] == "encoder":
+ logger.info(f"Skipping encoder layer {key} for decoder")
+ continue
+ if is_encoder and sub_layers[0] == "decoder":
+ logger.info(f"Skipping decoder layer {key} for encoder")
+ continue
+ for i, sub_layer in enumerate(sub_layers):
+ if sub_layer == "embeddings":
+ is_embedding = True
+ elif sub_layer == "LayerNorm":
+ is_embedding = False
+ if "layer" in sub_layer:
+ model_pointer = model_pointer.layer[int(sub_layer.split("_")[-1])]
+ elif sub_layer in ["kernel", "gamma"]:
+ model_pointer = model_pointer.weight
+ elif sub_layer == "beta":
+ model_pointer = model_pointer.bias
+ elif sub_layer == "encdec":
+ model_pointer = model_pointer.crossattention.self
+ elif sub_layer == "encdec_output":
+ model_pointer = model_pointer.crossattention.output
+ elif is_encoder_named_decoder and sub_layer == "decoder":
+ model_pointer = model_pointer.encoder
+ else:
+ if sub_layer == "attention" and "encdec" in sub_layers[i + 1]:
+ continue
+ try:
+ model_pointer = getattr(model_pointer, sub_layer)
+ except AttributeError:
+ logger.info(f"Skipping to initialize {key} at {sub_layer}...")
+ raise AttributeError
+
+ array = np.asarray(sess.run(all_variables[key]))
+ if not is_embedding:
+ logger.info(f"Transposing numpy weight of shape {array.shape} for {key}")
+ array = np.transpose(array)
+ else:
+ model_pointer = model_pointer.weight
+
+ if model_pointer.shape != array.shape:
+ raise ValueError(f"Pointer shape {model_pointer.shape} and array shape {array.shape} mismatched")
+ logger.info(f"Initialize PyTorch weight {key}")
+
+ model_pointer.data = torch.from_numpy(array.astype(np.float32))
+ keep_track_variables.pop(key, None)
+
+ logger.info(f"Weights not copied to PyTorch model: {', '.join(keep_track_variables.keys())}")
+ return model
+
+
+class BertGenerationEmbeddings(nn.Module):
+ """Construct the embeddings from word and position embeddings."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
+ # any TensorFlow checkpoint file
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
+ )
+
+ def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0):
+ if input_ids is not None:
+ input_shape = input_ids.size()
+ else:
+ input_shape = inputs_embeds.size()[:-1]
+
+ seq_length = input_shape[1]
+
+ if position_ids is None:
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
+
+ if inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+ position_embeddings = self.position_embeddings(position_ids)
+
+ embeddings = inputs_embeds + position_embeddings
+ embeddings = self.LayerNorm(embeddings)
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+
+class BertGenerationPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = BertGenerationConfig
+ base_model_prefix = "bert"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+BERT_GENERATION_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`BertGenerationConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+BERT_GENERATION_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
+ [`PreTrainedTokenizer.encode`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare BertGeneration model transformer outputting raw hidden-states without any specific head on top.",
+ BERT_GENERATION_START_DOCSTRING,
+)
+class BertGenerationEncoder(BertGenerationPreTrainedModel):
+ """
+
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
+
+ This model should be used when leveraging Bert or Roberta checkpoints for the [`EncoderDecoderModel`] class as
+ described in [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461)
+ by Sascha Rothe, Shashi Narayan, and Aliaksei Severyn.
+
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
+ """
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = BertGenerationEmbeddings(config)
+ self.encoder = BertEncoder(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.word_embeddings = value
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(BERT_GENERATION_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithPastAndCrossAttentions,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
+ r"""
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: `1` for
+ tokens that are NOT MASKED, `0` for MASKED tokens.
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if self.config.is_decoder:
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ else:
+ use_cache = False
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ batch_size, seq_length = input_shape
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ # past_key_values_length
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+
+ if attention_mask is None:
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
+
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
+ # ourselves in which case we just need to make it broadcastable to all heads.
+ extended_attention_mask = None
+ if not use_cache:
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
+
+ # If a 2D or 3D attention mask is provided for the cross-attention
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
+ if self.config.is_decoder and encoder_hidden_states is not None:
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
+ if encoder_attention_mask is None:
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
+ else:
+ encoder_extended_attention_mask = None
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ embedding_output = self.embeddings(
+ input_ids=input_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ past_key_values_length=past_key_values_length,
+ )
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_extended_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+
+ if not return_dict:
+ return (sequence_output,) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=sequence_output,
+ past_key_values=encoder_outputs.past_key_values,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ cross_attentions=encoder_outputs.cross_attentions,
+ )
+
+
+class BertGenerationOnlyLMHead(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
+ self.decoder.bias = self.bias
+
+ def forward(self, hidden_states):
+ logits = self.decoder(hidden_states)
+ return logits
+
+ def _tie_weights(self):
+ # To tie those two weights if they get disconnected (on TPU or when the bias is resized)
+ self.bias = self.decoder.bias
+
+
+@add_start_docstrings(
+ """BertGeneration Model with a `language modeling` head on top for CLM fine-tuning.""",
+ BERT_GENERATION_START_DOCSTRING,
+)
+class BertGenerationDecoder(BertGenerationPreTrainedModel):
+ _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"]
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ if not config.is_decoder:
+ logger.warning("If you want to use `BertGenerationDecoder` as a standalone, add `is_decoder=True.`")
+
+ self.bert = BertGenerationEncoder(config)
+ self.lm_head = BertGenerationOnlyLMHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.lm_head.decoder
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head.decoder = new_embeddings
+
+ @add_start_docstrings_to_model_forward(BERT_GENERATION_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
+ r"""
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
+ ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, BertGenerationDecoder, BertGenerationConfig
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
+ >>> config = BertGenerationConfig.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
+ >>> config.is_decoder = True
+ >>> model = BertGenerationDecoder.from_pretrained(
+ ... "google/bert_for_seq_generation_L-24_bbc_encoder", config=config
+ ... )
+
+ >>> inputs = tokenizer("Hello, my dog is cute", return_token_type_ids=False, return_tensors="pt")
+ >>> outputs = model(**inputs)
+
+ >>> prediction_logits = outputs.logits
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ if labels is not None:
+ use_cache = False
+
+ outputs = self.bert(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+ prediction_scores = self.lm_head(sequence_output)
+
+ lm_loss = None
+ if labels is not None:
+ # we are doing next-token prediction; shift prediction scores and input ids by one
+ shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
+ labels = labels[:, 1:].contiguous()
+ loss_fct = CrossEntropyLoss()
+ lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[1:]
+ return ((lm_loss,) + output) if lm_loss is not None else output
+
+ return CausalLMOutputWithCrossAttentions(
+ loss=lm_loss,
+ logits=prediction_scores,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
+ input_shape = input_ids.shape
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
+ if attention_mask is None:
+ attention_mask = input_ids.new_ones(input_shape)
+
+ # cut decoder_input_ids if past_key_values is used
+ if past_key_values is not None:
+ past_length = past_key_values[0][0].shape[2]
+
+ # Some generation methods already pass only the last input ID
+ if input_ids.shape[1] > past_length:
+ remove_prefix_length = past_length
+ else:
+ # Default to old behavior: keep only final ID
+ remove_prefix_length = input_ids.shape[1] - 1
+
+ input_ids = input_ids[:, remove_prefix_length:]
+
+ return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values}
+
+ def _reorder_cache(self, past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+ return reordered_past
diff --git a/venv/lib/python3.10/site-packages/transformers/models/bert_generation/tokenization_bert_generation.py b/venv/lib/python3.10/site-packages/transformers/models/bert_generation/tokenization_bert_generation.py
new file mode 100644
index 0000000000000000000000000000000000000000..772eb123c398884472862d41ab3f1cb580f99bb9
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/bert_generation/tokenization_bert_generation.py
@@ -0,0 +1,173 @@
+# coding=utf-8
+# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Tokenization class for model BertGeneration."""
+
+
+import os
+from shutil import copyfile
+from typing import Any, Dict, List, Optional, Tuple
+
+import sentencepiece as spm
+
+from ...tokenization_utils import PreTrainedTokenizer
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
+
+
+class BertGenerationTokenizer(PreTrainedTokenizer):
+ """
+ Construct a BertGeneration tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
+ contains the vocabulary necessary to instantiate a tokenizer.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The begin of sequence token.
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ sep_token (`str`, *optional*, defaults to `"<::::>"`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ sp_model_kwargs (`dict`, *optional*):
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
+ to set:
+
+ - `enable_sampling`: Enable subword regularization.
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
+
+ - `nbest_size = {0,1}`: No sampling is performed.
+ - `nbest_size > 1`: samples from the nbest_size results.
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
+ using forward-filtering-and-backward-sampling algorithm.
+
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
+ BPE-dropout.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ prefix_tokens: List[int] = []
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file,
+ bos_token="",
+ eos_token="",
+ unk_token="",
+ pad_token="",
+ sep_token="<::::>",
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
+ **kwargs,
+ ) -> None:
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+
+ self.vocab_file = vocab_file
+
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(vocab_file)
+
+ # Add extra_ids to the special token list
+ super().__init__(
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ sep_token=sep_token,
+ sp_model_kwargs=self.sp_model_kwargs,
+ **kwargs,
+ )
+
+ @property
+ def vocab_size(self):
+ return self.sp_model.get_piece_size()
+
+ def get_vocab(self):
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["sp_model"] = None
+ return state
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+
+ # for backward compatibility
+ if not hasattr(self, "sp_model_kwargs"):
+ self.sp_model_kwargs = {}
+
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(self.vocab_file)
+
+ def _tokenize(self, text: str) -> List[str]:
+ """Take as input a string and return a list of strings (tokens) for words/sub-words"""
+ return self.sp_model.encode(text, out_type=str)
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.sp_model.piece_to_id(token)
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ token = self.sp_model.IdToPiece(index)
+ return token
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ current_sub_tokens = []
+ out_string = ""
+ for token in tokens:
+ # make sure that special tokens are not decoded using sentencepiece model
+ if token in self.all_special_tokens:
+ out_string += self.sp_model.decode(current_sub_tokens) + token
+ current_sub_tokens = []
+ else:
+ current_sub_tokens.append(token)
+ out_string += self.sp_model.decode(current_sub_tokens)
+ return out_string.strip()
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+ elif not os.path.isfile(self.vocab_file):
+ with open(out_vocab_file, "wb") as fi:
+ content_spiece_model = self.sp_model.serialized_model_proto()
+ fi.write(content_spiece_model)
+
+ return (out_vocab_file,)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deberta/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/deberta/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..87806dd60d60c5247554c9458de8fd8ca3f45f0f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/deberta/__init__.py
@@ -0,0 +1,120 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_tf_available,
+ is_tokenizers_available,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
+ "tokenization_deberta": ["DebertaTokenizer"],
+}
+
+try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_deberta_fast"] = ["DebertaTokenizerFast"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_deberta"] = [
+ "DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "DebertaForMaskedLM",
+ "DebertaForQuestionAnswering",
+ "DebertaForSequenceClassification",
+ "DebertaForTokenClassification",
+ "DebertaModel",
+ "DebertaPreTrainedModel",
+ ]
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_deberta"] = [
+ "TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TFDebertaForMaskedLM",
+ "TFDebertaForQuestionAnswering",
+ "TFDebertaForSequenceClassification",
+ "TFDebertaForTokenClassification",
+ "TFDebertaModel",
+ "TFDebertaPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
+ from .tokenization_deberta import DebertaTokenizer
+
+ try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_deberta_fast import DebertaTokenizerFast
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_deberta import (
+ DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
+ DebertaForMaskedLM,
+ DebertaForQuestionAnswering,
+ DebertaForSequenceClassification,
+ DebertaForTokenClassification,
+ DebertaModel,
+ DebertaPreTrainedModel,
+ )
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_deberta import (
+ TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TFDebertaForMaskedLM,
+ TFDebertaForQuestionAnswering,
+ TFDebertaForSequenceClassification,
+ TFDebertaForTokenClassification,
+ TFDebertaModel,
+ TFDebertaPreTrainedModel,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ec042bc6ebe27fb88a3c2640e5b53a5f89d4107e
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/configuration_deberta.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/configuration_deberta.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a0e7a716e634cbedba5f1cff6253d5f224b286be
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/configuration_deberta.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/modeling_deberta.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/modeling_deberta.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5f0aea7c3621e7a880d2d52bf6dbfc6a63e293b6
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/modeling_deberta.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/modeling_tf_deberta.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/modeling_tf_deberta.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a107191b86ff60535c7b4e3e66d768cd34dba3e0
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/modeling_tf_deberta.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/tokenization_deberta.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/tokenization_deberta.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3f975988f16b0ac84a96e1545f99f8782387f02f
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/tokenization_deberta.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/tokenization_deberta_fast.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/tokenization_deberta_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0201ab2fa9eafb889d20778bbf8a27bf2927be7e
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deberta/__pycache__/tokenization_deberta_fast.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deberta/configuration_deberta.py b/venv/lib/python3.10/site-packages/transformers/models/deberta/configuration_deberta.py
new file mode 100644
index 0000000000000000000000000000000000000000..5907f0869d6821c49d6cbc417593b4b0a81e4768
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/deberta/configuration_deberta.py
@@ -0,0 +1,193 @@
+# coding=utf-8
+# Copyright 2020, Microsoft and the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" DeBERTa model configuration"""
+from collections import OrderedDict
+from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
+
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxConfig
+from ...utils import logging
+
+
+if TYPE_CHECKING:
+ from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class DebertaConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`DebertaModel`] or a [`TFDebertaModel`]. It is
+ used to instantiate a DeBERTa model according to the specified arguments, defining the model architecture.
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the DeBERTa
+ [microsoft/deberta-base](https://huggingface.co/microsoft/deberta-base) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Arguments:
+ vocab_size (`int`, *optional*, defaults to 30522):
+ Vocabulary size of the DeBERTa model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`DebertaModel`] or [`TFDebertaModel`].
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"`, `"gelu"`, `"tanh"`, `"gelu_fast"`, `"mish"`, `"linear"`, `"sigmoid"` and `"gelu_new"`
+ are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ type_vocab_size (`int`, *optional*, defaults to 2):
+ The vocabulary size of the `token_type_ids` passed when calling [`DebertaModel`] or [`TFDebertaModel`].
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ relative_attention (`bool`, *optional*, defaults to `False`):
+ Whether use relative position encoding.
+ max_relative_positions (`int`, *optional*, defaults to 1):
+ The range of relative positions `[-max_position_embeddings, max_position_embeddings]`. Use the same value
+ as `max_position_embeddings`.
+ pad_token_id (`int`, *optional*, defaults to 0):
+ The value used to pad input_ids.
+ position_biased_input (`bool`, *optional*, defaults to `True`):
+ Whether add absolute position embedding to content embedding.
+ pos_att_type (`List[str]`, *optional*):
+ The type of relative position attention, it can be a combination of `["p2c", "c2p"]`, e.g. `["p2c"]`,
+ `["p2c", "c2p"]`.
+ layer_norm_eps (`float`, optional, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+
+ Example:
+
+ ```python
+ >>> from transformers import DebertaConfig, DebertaModel
+
+ >>> # Initializing a DeBERTa microsoft/deberta-base style configuration
+ >>> configuration = DebertaConfig()
+
+ >>> # Initializing a model (with random weights) from the microsoft/deberta-base style configuration
+ >>> model = DebertaModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "deberta"
+
+ def __init__(
+ self,
+ vocab_size=50265,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.1,
+ attention_probs_dropout_prob=0.1,
+ max_position_embeddings=512,
+ type_vocab_size=0,
+ initializer_range=0.02,
+ layer_norm_eps=1e-7,
+ relative_attention=False,
+ max_relative_positions=-1,
+ pad_token_id=0,
+ position_biased_input=True,
+ pos_att_type=None,
+ pooler_dropout=0,
+ pooler_hidden_act="gelu",
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.intermediate_size = intermediate_size
+ self.hidden_act = hidden_act
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.max_position_embeddings = max_position_embeddings
+ self.type_vocab_size = type_vocab_size
+ self.initializer_range = initializer_range
+ self.relative_attention = relative_attention
+ self.max_relative_positions = max_relative_positions
+ self.pad_token_id = pad_token_id
+ self.position_biased_input = position_biased_input
+
+ # Backwards compatibility
+ if isinstance(pos_att_type, str):
+ pos_att_type = [x.strip() for x in pos_att_type.lower().split("|")]
+
+ self.pos_att_type = pos_att_type
+ self.vocab_size = vocab_size
+ self.layer_norm_eps = layer_norm_eps
+
+ self.pooler_hidden_size = kwargs.get("pooler_hidden_size", hidden_size)
+ self.pooler_dropout = pooler_dropout
+ self.pooler_hidden_act = pooler_hidden_act
+
+
+# Copied from transformers.models.deberta_v2.configuration_deberta_v2.DebertaV2OnnxConfig
+class DebertaOnnxConfig(OnnxConfig):
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ if self.task == "multiple-choice":
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
+ else:
+ dynamic_axis = {0: "batch", 1: "sequence"}
+ if self._config.type_vocab_size > 0:
+ return OrderedDict(
+ [("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)]
+ )
+ else:
+ return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)])
+
+ @property
+ def default_onnx_opset(self) -> int:
+ return 12
+
+ def generate_dummy_inputs(
+ self,
+ preprocessor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],
+ batch_size: int = -1,
+ seq_length: int = -1,
+ num_choices: int = -1,
+ is_pair: bool = False,
+ framework: Optional["TensorType"] = None,
+ num_channels: int = 3,
+ image_width: int = 40,
+ image_height: int = 40,
+ tokenizer: "PreTrainedTokenizerBase" = None,
+ ) -> Mapping[str, Any]:
+ dummy_inputs = super().generate_dummy_inputs(preprocessor=preprocessor, framework=framework)
+ if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
+ del dummy_inputs["token_type_ids"]
+ return dummy_inputs
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deberta/modeling_deberta.py b/venv/lib/python3.10/site-packages/transformers/models/deberta/modeling_deberta.py
new file mode 100644
index 0000000000000000000000000000000000000000..42dae5c80894a8ee8265fbb096fa579905aa1bb2
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/deberta/modeling_deberta.py
@@ -0,0 +1,1426 @@
+# coding=utf-8
+# Copyright 2020 Microsoft and the Hugging Face Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch DeBERTa model."""
+
+from collections.abc import Sequence
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import (
+ BaseModelOutput,
+ MaskedLMOutput,
+ QuestionAnsweringModelOutput,
+ SequenceClassifierOutput,
+ TokenClassifierOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import softmax_backward_data
+from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
+from .configuration_deberta import DebertaConfig
+
+
+logger = logging.get_logger(__name__)
+_CONFIG_FOR_DOC = "DebertaConfig"
+_CHECKPOINT_FOR_DOC = "microsoft/deberta-base"
+
+# Masked LM docstring
+_CHECKPOINT_FOR_MASKED_LM = "lsanochkin/deberta-large-feedback"
+_MASKED_LM_EXPECTED_OUTPUT = "' Paris'"
+_MASKED_LM_EXPECTED_LOSS = "0.54"
+
+# QuestionAnswering docstring
+_CHECKPOINT_FOR_QA = "Palak/microsoft_deberta-large_squad"
+_QA_EXPECTED_OUTPUT = "' a nice puppet'"
+_QA_EXPECTED_LOSS = 0.14
+_QA_TARGET_START_INDEX = 12
+_QA_TARGET_END_INDEX = 14
+
+
+from ..deprecated._archive_maps import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+class ContextPooler(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size)
+ self.dropout = StableDropout(config.pooler_dropout)
+ self.config = config
+
+ def forward(self, hidden_states):
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+
+ context_token = hidden_states[:, 0]
+ context_token = self.dropout(context_token)
+ pooled_output = self.dense(context_token)
+ pooled_output = ACT2FN[self.config.pooler_hidden_act](pooled_output)
+ return pooled_output
+
+ @property
+ def output_dim(self):
+ return self.config.hidden_size
+
+
+class XSoftmax(torch.autograd.Function):
+ """
+ Masked Softmax which is optimized for saving memory
+
+ Args:
+ input (`torch.tensor`): The input tensor that will apply softmax.
+ mask (`torch.IntTensor`):
+ The mask matrix where 0 indicate that element will be ignored in the softmax calculation.
+ dim (int): The dimension that will apply softmax
+
+ Example:
+
+ ```python
+ >>> import torch
+ >>> from transformers.models.deberta.modeling_deberta import XSoftmax
+
+ >>> # Make a tensor
+ >>> x = torch.randn([4, 20, 100])
+
+ >>> # Create a mask
+ >>> mask = (x > 0).int()
+
+ >>> # Specify the dimension to apply softmax
+ >>> dim = -1
+
+ >>> y = XSoftmax.apply(x, mask, dim)
+ ```"""
+
+ @staticmethod
+ def forward(self, input, mask, dim):
+ self.dim = dim
+ rmask = ~(mask.to(torch.bool))
+
+ output = input.masked_fill(rmask, torch.tensor(torch.finfo(input.dtype).min))
+ output = torch.softmax(output, self.dim)
+ output.masked_fill_(rmask, 0)
+ self.save_for_backward(output)
+ return output
+
+ @staticmethod
+ def backward(self, grad_output):
+ (output,) = self.saved_tensors
+ inputGrad = softmax_backward_data(self, grad_output, output, self.dim, output)
+ return inputGrad, None, None
+
+ @staticmethod
+ def symbolic(g, self, mask, dim):
+ import torch.onnx.symbolic_helper as sym_help
+ from torch.onnx.symbolic_opset9 import masked_fill, softmax
+
+ mask_cast_value = g.op("Cast", mask, to_i=sym_help.cast_pytorch_to_onnx["Long"])
+ r_mask = g.op(
+ "Cast",
+ g.op("Sub", g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64)), mask_cast_value),
+ to_i=sym_help.cast_pytorch_to_onnx["Bool"],
+ )
+ output = masked_fill(
+ g, self, r_mask, g.op("Constant", value_t=torch.tensor(torch.finfo(self.type().dtype()).min))
+ )
+ output = softmax(g, output, dim)
+ return masked_fill(g, output, r_mask, g.op("Constant", value_t=torch.tensor(0, dtype=torch.bool)))
+
+
+class DropoutContext(object):
+ def __init__(self):
+ self.dropout = 0
+ self.mask = None
+ self.scale = 1
+ self.reuse_mask = True
+
+
+def get_mask(input, local_context):
+ if not isinstance(local_context, DropoutContext):
+ dropout = local_context
+ mask = None
+ else:
+ dropout = local_context.dropout
+ dropout *= local_context.scale
+ mask = local_context.mask if local_context.reuse_mask else None
+
+ if dropout > 0 and mask is None:
+ mask = (1 - torch.empty_like(input).bernoulli_(1 - dropout)).to(torch.bool)
+
+ if isinstance(local_context, DropoutContext):
+ if local_context.mask is None:
+ local_context.mask = mask
+
+ return mask, dropout
+
+
+class XDropout(torch.autograd.Function):
+ """Optimized dropout function to save computation and memory by using mask operation instead of multiplication."""
+
+ @staticmethod
+ def forward(ctx, input, local_ctx):
+ mask, dropout = get_mask(input, local_ctx)
+ ctx.scale = 1.0 / (1 - dropout)
+ if dropout > 0:
+ ctx.save_for_backward(mask)
+ return input.masked_fill(mask, 0) * ctx.scale
+ else:
+ return input
+
+ @staticmethod
+ def backward(ctx, grad_output):
+ if ctx.scale > 1:
+ (mask,) = ctx.saved_tensors
+ return grad_output.masked_fill(mask, 0) * ctx.scale, None
+ else:
+ return grad_output, None
+
+ @staticmethod
+ def symbolic(g: torch._C.Graph, input: torch._C.Value, local_ctx: Union[float, DropoutContext]) -> torch._C.Value:
+ from torch.onnx import symbolic_opset12
+
+ dropout_p = local_ctx
+ if isinstance(local_ctx, DropoutContext):
+ dropout_p = local_ctx.dropout
+ # StableDropout only calls this function when training.
+ train = True
+ # TODO: We should check if the opset_version being used to export
+ # is > 12 here, but there's no good way to do that. As-is, if the
+ # opset_version < 12, export will fail with a CheckerError.
+ # Once https://github.com/pytorch/pytorch/issues/78391 is fixed, do something like:
+ # if opset_version < 12:
+ # return torch.onnx.symbolic_opset9.dropout(g, input, dropout_p, train)
+ return symbolic_opset12.dropout(g, input, dropout_p, train)
+
+
+class StableDropout(nn.Module):
+ """
+ Optimized dropout module for stabilizing the training
+
+ Args:
+ drop_prob (float): the dropout probabilities
+ """
+
+ def __init__(self, drop_prob):
+ super().__init__()
+ self.drop_prob = drop_prob
+ self.count = 0
+ self.context_stack = None
+
+ def forward(self, x):
+ """
+ Call the module
+
+ Args:
+ x (`torch.tensor`): The input tensor to apply dropout
+ """
+ if self.training and self.drop_prob > 0:
+ return XDropout.apply(x, self.get_context())
+ return x
+
+ def clear_context(self):
+ self.count = 0
+ self.context_stack = None
+
+ def init_context(self, reuse_mask=True, scale=1):
+ if self.context_stack is None:
+ self.context_stack = []
+ self.count = 0
+ for c in self.context_stack:
+ c.reuse_mask = reuse_mask
+ c.scale = scale
+
+ def get_context(self):
+ if self.context_stack is not None:
+ if self.count >= len(self.context_stack):
+ self.context_stack.append(DropoutContext())
+ ctx = self.context_stack[self.count]
+ ctx.dropout = self.drop_prob
+ self.count += 1
+ return ctx
+ else:
+ return self.drop_prob
+
+
+class DebertaLayerNorm(nn.Module):
+ """LayerNorm module in the TF style (epsilon inside the square root)."""
+
+ def __init__(self, size, eps=1e-12):
+ super().__init__()
+ self.weight = nn.Parameter(torch.ones(size))
+ self.bias = nn.Parameter(torch.zeros(size))
+ self.variance_epsilon = eps
+
+ def forward(self, hidden_states):
+ input_type = hidden_states.dtype
+ hidden_states = hidden_states.float()
+ mean = hidden_states.mean(-1, keepdim=True)
+ variance = (hidden_states - mean).pow(2).mean(-1, keepdim=True)
+ hidden_states = (hidden_states - mean) / torch.sqrt(variance + self.variance_epsilon)
+ hidden_states = hidden_states.to(input_type)
+ y = self.weight * hidden_states + self.bias
+ return y
+
+
+class DebertaSelfOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps)
+ self.dropout = StableDropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states, input_tensor):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+class DebertaAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.self = DisentangledSelfAttention(config)
+ self.output = DebertaSelfOutput(config)
+ self.config = config
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask,
+ output_attentions=False,
+ query_states=None,
+ relative_pos=None,
+ rel_embeddings=None,
+ ):
+ self_output = self.self(
+ hidden_states,
+ attention_mask,
+ output_attentions,
+ query_states=query_states,
+ relative_pos=relative_pos,
+ rel_embeddings=rel_embeddings,
+ )
+ if output_attentions:
+ self_output, att_matrix = self_output
+ if query_states is None:
+ query_states = hidden_states
+ attention_output = self.output(self_output, query_states)
+
+ if output_attentions:
+ return (attention_output, att_matrix)
+ else:
+ return attention_output
+
+
+# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Deberta
+class DebertaIntermediate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+class DebertaOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps)
+ self.dropout = StableDropout(config.hidden_dropout_prob)
+ self.config = config
+
+ def forward(self, hidden_states, input_tensor):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+class DebertaLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.attention = DebertaAttention(config)
+ self.intermediate = DebertaIntermediate(config)
+ self.output = DebertaOutput(config)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask,
+ query_states=None,
+ relative_pos=None,
+ rel_embeddings=None,
+ output_attentions=False,
+ ):
+ attention_output = self.attention(
+ hidden_states,
+ attention_mask,
+ output_attentions=output_attentions,
+ query_states=query_states,
+ relative_pos=relative_pos,
+ rel_embeddings=rel_embeddings,
+ )
+ if output_attentions:
+ attention_output, att_matrix = attention_output
+ intermediate_output = self.intermediate(attention_output)
+ layer_output = self.output(intermediate_output, attention_output)
+ if output_attentions:
+ return (layer_output, att_matrix)
+ else:
+ return layer_output
+
+
+class DebertaEncoder(nn.Module):
+ """Modified BertEncoder with relative position bias support"""
+
+ def __init__(self, config):
+ super().__init__()
+ self.layer = nn.ModuleList([DebertaLayer(config) for _ in range(config.num_hidden_layers)])
+ self.relative_attention = getattr(config, "relative_attention", False)
+ if self.relative_attention:
+ self.max_relative_positions = getattr(config, "max_relative_positions", -1)
+ if self.max_relative_positions < 1:
+ self.max_relative_positions = config.max_position_embeddings
+ self.rel_embeddings = nn.Embedding(self.max_relative_positions * 2, config.hidden_size)
+ self.gradient_checkpointing = False
+
+ def get_rel_embedding(self):
+ rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None
+ return rel_embeddings
+
+ def get_attention_mask(self, attention_mask):
+ if attention_mask.dim() <= 2:
+ extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
+ attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1)
+ elif attention_mask.dim() == 3:
+ attention_mask = attention_mask.unsqueeze(1)
+
+ return attention_mask
+
+ def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):
+ if self.relative_attention and relative_pos is None:
+ q = query_states.size(-2) if query_states is not None else hidden_states.size(-2)
+ relative_pos = build_relative_position(q, hidden_states.size(-2), hidden_states.device)
+ return relative_pos
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask,
+ output_hidden_states=True,
+ output_attentions=False,
+ query_states=None,
+ relative_pos=None,
+ return_dict=True,
+ ):
+ attention_mask = self.get_attention_mask(attention_mask)
+ relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)
+
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ if isinstance(hidden_states, Sequence):
+ next_kv = hidden_states[0]
+ else:
+ next_kv = hidden_states
+ rel_embeddings = self.get_rel_embedding()
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ hidden_states = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ next_kv,
+ attention_mask,
+ query_states,
+ relative_pos,
+ rel_embeddings,
+ output_attentions,
+ )
+ else:
+ hidden_states = layer_module(
+ next_kv,
+ attention_mask,
+ query_states=query_states,
+ relative_pos=relative_pos,
+ rel_embeddings=rel_embeddings,
+ output_attentions=output_attentions,
+ )
+
+ if output_attentions:
+ hidden_states, att_m = hidden_states
+
+ if query_states is not None:
+ query_states = hidden_states
+ if isinstance(hidden_states, Sequence):
+ next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None
+ else:
+ next_kv = hidden_states
+
+ if output_attentions:
+ all_attentions = all_attentions + (att_m,)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
+ )
+
+
+def build_relative_position(query_size, key_size, device):
+ """
+ Build relative position according to the query and key
+
+ We assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key
+ \\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q -
+ P_k\\)
+
+ Args:
+ query_size (int): the length of query
+ key_size (int): the length of key
+
+ Return:
+ `torch.LongTensor`: A tensor with shape [1, query_size, key_size]
+
+ """
+
+ q_ids = torch.arange(query_size, dtype=torch.long, device=device)
+ k_ids = torch.arange(key_size, dtype=torch.long, device=device)
+ rel_pos_ids = q_ids[:, None] - k_ids.view(1, -1).repeat(query_size, 1)
+ rel_pos_ids = rel_pos_ids[:query_size, :]
+ rel_pos_ids = rel_pos_ids.unsqueeze(0)
+ return rel_pos_ids
+
+
+@torch.jit.script
+def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):
+ return c2p_pos.expand([query_layer.size(0), query_layer.size(1), query_layer.size(2), relative_pos.size(-1)])
+
+
+@torch.jit.script
+def p2c_dynamic_expand(c2p_pos, query_layer, key_layer):
+ return c2p_pos.expand([query_layer.size(0), query_layer.size(1), key_layer.size(-2), key_layer.size(-2)])
+
+
+@torch.jit.script
+def pos_dynamic_expand(pos_index, p2c_att, key_layer):
+ return pos_index.expand(p2c_att.size()[:2] + (pos_index.size(-2), key_layer.size(-2)))
+
+
+class DisentangledSelfAttention(nn.Module):
+ """
+ Disentangled self-attention module
+
+ Parameters:
+ config (`str`):
+ A model config class instance with the configuration to build a new model. The schema is similar to
+ *BertConfig*, for more details, please refer [`DebertaConfig`]
+
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0:
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads})"
+ )
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+ self.in_proj = nn.Linear(config.hidden_size, self.all_head_size * 3, bias=False)
+ self.q_bias = nn.Parameter(torch.zeros((self.all_head_size), dtype=torch.float))
+ self.v_bias = nn.Parameter(torch.zeros((self.all_head_size), dtype=torch.float))
+ self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else []
+
+ self.relative_attention = getattr(config, "relative_attention", False)
+ self.talking_head = getattr(config, "talking_head", False)
+
+ if self.talking_head:
+ self.head_logits_proj = nn.Linear(config.num_attention_heads, config.num_attention_heads, bias=False)
+ self.head_weights_proj = nn.Linear(config.num_attention_heads, config.num_attention_heads, bias=False)
+
+ if self.relative_attention:
+ self.max_relative_positions = getattr(config, "max_relative_positions", -1)
+ if self.max_relative_positions < 1:
+ self.max_relative_positions = config.max_position_embeddings
+ self.pos_dropout = StableDropout(config.hidden_dropout_prob)
+
+ if "c2p" in self.pos_att_type:
+ self.pos_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
+ if "p2c" in self.pos_att_type:
+ self.pos_q_proj = nn.Linear(config.hidden_size, self.all_head_size)
+
+ self.dropout = StableDropout(config.attention_probs_dropout_prob)
+
+ def transpose_for_scores(self, x):
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, -1)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask,
+ output_attentions=False,
+ query_states=None,
+ relative_pos=None,
+ rel_embeddings=None,
+ ):
+ """
+ Call the module
+
+ Args:
+ hidden_states (`torch.FloatTensor`):
+ Input states to the module usually the output from previous layer, it will be the Q,K and V in
+ *Attention(Q,K,V)*
+
+ attention_mask (`torch.BoolTensor`):
+ An attention mask matrix of shape [*B*, *N*, *N*] where *B* is the batch size, *N* is the maximum
+ sequence length in which element [i,j] = *1* means the *i* th token in the input can attend to the *j*
+ th token.
+
+ output_attentions (`bool`, optional):
+ Whether return the attention matrix.
+
+ query_states (`torch.FloatTensor`, optional):
+ The *Q* state in *Attention(Q,K,V)*.
+
+ relative_pos (`torch.LongTensor`):
+ The relative position encoding between the tokens in the sequence. It's of shape [*B*, *N*, *N*] with
+ values ranging in [*-max_relative_positions*, *max_relative_positions*].
+
+ rel_embeddings (`torch.FloatTensor`):
+ The embedding of relative distances. It's a tensor of shape [\\(2 \\times
+ \\text{max_relative_positions}\\), *hidden_size*].
+
+
+ """
+ if query_states is None:
+ qp = self.in_proj(hidden_states) # .split(self.all_head_size, dim=-1)
+ query_layer, key_layer, value_layer = self.transpose_for_scores(qp).chunk(3, dim=-1)
+ else:
+
+ def linear(w, b, x):
+ if b is not None:
+ return torch.matmul(x, w.t()) + b.t()
+ else:
+ return torch.matmul(x, w.t()) # + b.t()
+
+ ws = self.in_proj.weight.chunk(self.num_attention_heads * 3, dim=0)
+ qkvw = [torch.cat([ws[i * 3 + k] for i in range(self.num_attention_heads)], dim=0) for k in range(3)]
+ qkvb = [None] * 3
+
+ q = linear(qkvw[0], qkvb[0], query_states.to(dtype=qkvw[0].dtype))
+ k, v = [linear(qkvw[i], qkvb[i], hidden_states.to(dtype=qkvw[i].dtype)) for i in range(1, 3)]
+ query_layer, key_layer, value_layer = [self.transpose_for_scores(x) for x in [q, k, v]]
+
+ query_layer = query_layer + self.transpose_for_scores(self.q_bias[None, None, :])
+ value_layer = value_layer + self.transpose_for_scores(self.v_bias[None, None, :])
+
+ rel_att = None
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ scale_factor = 1 + len(self.pos_att_type)
+ scale = torch.sqrt(torch.tensor(query_layer.size(-1), dtype=torch.float) * scale_factor)
+ query_layer = query_layer / scale.to(dtype=query_layer.dtype)
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+ if self.relative_attention:
+ rel_embeddings = self.pos_dropout(rel_embeddings)
+ rel_att = self.disentangled_att_bias(query_layer, key_layer, relative_pos, rel_embeddings, scale_factor)
+
+ if rel_att is not None:
+ attention_scores = attention_scores + rel_att
+
+ # bxhxlxd
+ if self.talking_head:
+ attention_scores = self.head_logits_proj(attention_scores.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
+
+ attention_probs = XSoftmax.apply(attention_scores, attention_mask, -1)
+ attention_probs = self.dropout(attention_probs)
+ if self.talking_head:
+ attention_probs = self.head_weights_proj(attention_probs.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (-1,)
+ context_layer = context_layer.view(new_context_layer_shape)
+ if output_attentions:
+ return (context_layer, attention_probs)
+ else:
+ return context_layer
+
+ def disentangled_att_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):
+ if relative_pos is None:
+ q = query_layer.size(-2)
+ relative_pos = build_relative_position(q, key_layer.size(-2), query_layer.device)
+ if relative_pos.dim() == 2:
+ relative_pos = relative_pos.unsqueeze(0).unsqueeze(0)
+ elif relative_pos.dim() == 3:
+ relative_pos = relative_pos.unsqueeze(1)
+ # bxhxqxk
+ elif relative_pos.dim() != 4:
+ raise ValueError(f"Relative position ids must be of dim 2 or 3 or 4. {relative_pos.dim()}")
+
+ att_span = min(max(query_layer.size(-2), key_layer.size(-2)), self.max_relative_positions)
+ relative_pos = relative_pos.long().to(query_layer.device)
+ rel_embeddings = rel_embeddings[
+ self.max_relative_positions - att_span : self.max_relative_positions + att_span, :
+ ].unsqueeze(0)
+
+ score = 0
+
+ # content->position
+ if "c2p" in self.pos_att_type:
+ pos_key_layer = self.pos_proj(rel_embeddings)
+ pos_key_layer = self.transpose_for_scores(pos_key_layer)
+ c2p_att = torch.matmul(query_layer, pos_key_layer.transpose(-1, -2))
+ c2p_pos = torch.clamp(relative_pos + att_span, 0, att_span * 2 - 1)
+ c2p_att = torch.gather(c2p_att, dim=-1, index=c2p_dynamic_expand(c2p_pos, query_layer, relative_pos))
+ score += c2p_att
+
+ # position->content
+ if "p2c" in self.pos_att_type:
+ pos_query_layer = self.pos_q_proj(rel_embeddings)
+ pos_query_layer = self.transpose_for_scores(pos_query_layer)
+ pos_query_layer /= torch.sqrt(torch.tensor(pos_query_layer.size(-1), dtype=torch.float) * scale_factor)
+ if query_layer.size(-2) != key_layer.size(-2):
+ r_pos = build_relative_position(key_layer.size(-2), key_layer.size(-2), query_layer.device)
+ else:
+ r_pos = relative_pos
+ p2c_pos = torch.clamp(-r_pos + att_span, 0, att_span * 2 - 1)
+ p2c_att = torch.matmul(key_layer, pos_query_layer.transpose(-1, -2).to(dtype=key_layer.dtype))
+ p2c_att = torch.gather(
+ p2c_att, dim=-1, index=p2c_dynamic_expand(p2c_pos, query_layer, key_layer)
+ ).transpose(-1, -2)
+
+ if query_layer.size(-2) != key_layer.size(-2):
+ pos_index = relative_pos[:, :, :, 0].unsqueeze(-1)
+ p2c_att = torch.gather(p2c_att, dim=-2, index=pos_dynamic_expand(pos_index, p2c_att, key_layer))
+ score += p2c_att
+
+ return score
+
+
+class DebertaEmbeddings(nn.Module):
+ """Construct the embeddings from word, position and token_type embeddings."""
+
+ def __init__(self, config):
+ super().__init__()
+ pad_token_id = getattr(config, "pad_token_id", 0)
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
+ self.word_embeddings = nn.Embedding(config.vocab_size, self.embedding_size, padding_idx=pad_token_id)
+
+ self.position_biased_input = getattr(config, "position_biased_input", True)
+ if not self.position_biased_input:
+ self.position_embeddings = None
+ else:
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.embedding_size)
+
+ if config.type_vocab_size > 0:
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, self.embedding_size)
+
+ if self.embedding_size != config.hidden_size:
+ self.embed_proj = nn.Linear(self.embedding_size, config.hidden_size, bias=False)
+ self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps)
+ self.dropout = StableDropout(config.hidden_dropout_prob)
+ self.config = config
+
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
+ )
+
+ def forward(self, input_ids=None, token_type_ids=None, position_ids=None, mask=None, inputs_embeds=None):
+ if input_ids is not None:
+ input_shape = input_ids.size()
+ else:
+ input_shape = inputs_embeds.size()[:-1]
+
+ seq_length = input_shape[1]
+
+ if position_ids is None:
+ position_ids = self.position_ids[:, :seq_length]
+
+ if token_type_ids is None:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+
+ if self.position_embeddings is not None:
+ position_embeddings = self.position_embeddings(position_ids.long())
+ else:
+ position_embeddings = torch.zeros_like(inputs_embeds)
+
+ embeddings = inputs_embeds
+ if self.position_biased_input:
+ embeddings += position_embeddings
+ if self.config.type_vocab_size > 0:
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
+ embeddings += token_type_embeddings
+
+ if self.embedding_size != self.config.hidden_size:
+ embeddings = self.embed_proj(embeddings)
+
+ embeddings = self.LayerNorm(embeddings)
+
+ if mask is not None:
+ if mask.dim() != embeddings.dim():
+ if mask.dim() == 4:
+ mask = mask.squeeze(1).squeeze(1)
+ mask = mask.unsqueeze(2)
+ mask = mask.to(embeddings.dtype)
+
+ embeddings = embeddings * mask
+
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+
+class DebertaPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = DebertaConfig
+ base_model_prefix = "deberta"
+ _keys_to_ignore_on_load_unexpected = ["position_embeddings"]
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ """Initialize the weights."""
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+
+DEBERTA_START_DOCSTRING = r"""
+ The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled
+ Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build
+ on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two
+ improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+
+ Parameters:
+ config ([`DebertaConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+DEBERTA_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.",
+ DEBERTA_START_DOCSTRING,
+)
+class DebertaModel(DebertaPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.embeddings = DebertaEmbeddings(config)
+ self.encoder = DebertaEncoder(config)
+ self.z_steps = 0
+ self.config = config
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, new_embeddings):
+ self.embeddings.word_embeddings = new_embeddings
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ raise NotImplementedError("The prune function is not implemented in DeBERTa model.")
+
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ if attention_mask is None:
+ attention_mask = torch.ones(input_shape, device=device)
+ if token_type_ids is None:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
+
+ embedding_output = self.embeddings(
+ input_ids=input_ids,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ )
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ attention_mask,
+ output_hidden_states=True,
+ output_attentions=output_attentions,
+ return_dict=return_dict,
+ )
+ encoded_layers = encoder_outputs[1]
+
+ if self.z_steps > 1:
+ hidden_states = encoded_layers[-2]
+ layers = [self.encoder.layer[-1] for _ in range(self.z_steps)]
+ query_states = encoded_layers[-1]
+ rel_embeddings = self.encoder.get_rel_embedding()
+ attention_mask = self.encoder.get_attention_mask(attention_mask)
+ rel_pos = self.encoder.get_rel_pos(embedding_output)
+ for layer in layers[1:]:
+ query_states = layer(
+ hidden_states,
+ attention_mask,
+ output_attentions=False,
+ query_states=query_states,
+ relative_pos=rel_pos,
+ rel_embeddings=rel_embeddings,
+ )
+ encoded_layers.append(query_states)
+
+ sequence_output = encoded_layers[-1]
+
+ if not return_dict:
+ return (sequence_output,) + encoder_outputs[(1 if output_hidden_states else 2) :]
+
+ return BaseModelOutput(
+ last_hidden_state=sequence_output,
+ hidden_states=encoder_outputs.hidden_states if output_hidden_states else None,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings("""DeBERTa Model with a `language modeling` head on top.""", DEBERTA_START_DOCSTRING)
+class DebertaForMaskedLM(DebertaPreTrainedModel):
+ _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"]
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.deberta = DebertaModel(config)
+ self.cls = DebertaOnlyMLMHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.cls.predictions.decoder
+
+ def set_output_embeddings(self, new_embeddings):
+ self.cls.predictions.decoder = new_embeddings
+
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_MASKED_LM,
+ output_type=MaskedLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ mask="[MASK]",
+ expected_output=_MASKED_LM_EXPECTED_OUTPUT,
+ expected_loss=_MASKED_LM_EXPECTED_LOSS,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, MaskedLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ """
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.deberta(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+ prediction_scores = self.cls(sequence_output)
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[1:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return MaskedLMOutput(
+ loss=masked_lm_loss,
+ logits=prediction_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+class DebertaPredictionHeadTransform(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
+
+ self.dense = nn.Linear(config.hidden_size, self.embedding_size)
+ if isinstance(config.hidden_act, str):
+ self.transform_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.transform_act_fn = config.hidden_act
+ self.LayerNorm = nn.LayerNorm(self.embedding_size, eps=config.layer_norm_eps)
+
+ def forward(self, hidden_states):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.transform_act_fn(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states)
+ return hidden_states
+
+
+class DebertaLMPredictionHead(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.transform = DebertaPredictionHeadTransform(config)
+
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
+ # The output weights are the same as the input embeddings, but there is
+ # an output-only bias for each token.
+ self.decoder = nn.Linear(self.embedding_size, config.vocab_size, bias=False)
+
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
+
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
+ self.decoder.bias = self.bias
+
+ def forward(self, hidden_states):
+ hidden_states = self.transform(hidden_states)
+ hidden_states = self.decoder(hidden_states)
+ return hidden_states
+
+
+# copied from transformers.models.bert.BertOnlyMLMHead with bert -> deberta
+class DebertaOnlyMLMHead(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.predictions = DebertaLMPredictionHead(config)
+
+ def forward(self, sequence_output):
+ prediction_scores = self.predictions(sequence_output)
+ return prediction_scores
+
+
+@add_start_docstrings(
+ """
+ DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
+ pooled output) e.g. for GLUE tasks.
+ """,
+ DEBERTA_START_DOCSTRING,
+)
+class DebertaForSequenceClassification(DebertaPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ num_labels = getattr(config, "num_labels", 2)
+ self.num_labels = num_labels
+
+ self.deberta = DebertaModel(config)
+ self.pooler = ContextPooler(config)
+ output_dim = self.pooler.output_dim
+
+ self.classifier = nn.Linear(output_dim, num_labels)
+ drop_out = getattr(config, "cls_dropout", None)
+ drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
+ self.dropout = StableDropout(drop_out)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.deberta.get_input_embeddings()
+
+ def set_input_embeddings(self, new_embeddings):
+ self.deberta.set_input_embeddings(new_embeddings)
+
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=SequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, SequenceClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.deberta(
+ input_ids,
+ token_type_ids=token_type_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ encoder_layer = outputs[0]
+ pooled_output = self.pooler(encoder_layer)
+ pooled_output = self.dropout(pooled_output)
+ logits = self.classifier(pooled_output)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ # regression task
+ loss_fn = nn.MSELoss()
+ logits = logits.view(-1).to(labels.dtype)
+ loss = loss_fn(logits, labels.view(-1))
+ elif labels.dim() == 1 or labels.size(-1) == 1:
+ label_index = (labels >= 0).nonzero()
+ labels = labels.long()
+ if label_index.size(0) > 0:
+ labeled_logits = torch.gather(
+ logits, 0, label_index.expand(label_index.size(0), logits.size(1))
+ )
+ labels = torch.gather(labels, 0, label_index.view(-1))
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(labeled_logits.view(-1, self.num_labels).float(), labels.view(-1))
+ else:
+ loss = torch.tensor(0).to(logits)
+ else:
+ log_softmax = nn.LogSoftmax(-1)
+ loss = -((log_softmax(logits) * labels).sum(-1)).mean()
+ elif self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutput(
+ loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
+ )
+
+
+@add_start_docstrings(
+ """
+ DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
+ Named-Entity-Recognition (NER) tasks.
+ """,
+ DEBERTA_START_DOCSTRING,
+)
+class DebertaForTokenClassification(DebertaPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.deberta = DebertaModel(config)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, TokenClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.deberta(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ sequence_output = self.dropout(sequence_output)
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TokenClassifierOutput(
+ loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
+ )
+
+
+@add_start_docstrings(
+ """
+ DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ DEBERTA_START_DOCSTRING,
+)
+class DebertaForQuestionAnswering(DebertaPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.deberta = DebertaModel(config)
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_QA,
+ output_type=QuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_QA_EXPECTED_OUTPUT,
+ expected_loss=_QA_EXPECTED_LOSS,
+ qa_target_start_index=_QA_TARGET_START_INDEX,
+ qa_target_end_index=_QA_TARGET_END_INDEX,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ start_positions: Optional[torch.Tensor] = None,
+ end_positions: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.deberta(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = logits.split(1, dim=-1)
+ start_logits = start_logits.squeeze(-1).contiguous()
+ end_logits = end_logits.squeeze(-1).contiguous()
+
+ total_loss = None
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, split add a dimension
+ if len(start_positions.size()) > 1:
+ start_positions = start_positions.squeeze(-1)
+ if len(end_positions.size()) > 1:
+ end_positions = end_positions.squeeze(-1)
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
+ ignored_index = start_logits.size(1)
+ start_positions = start_positions.clamp(0, ignored_index)
+ end_positions = end_positions.clamp(0, ignored_index)
+
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[1:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return QuestionAnsweringModelOutput(
+ loss=total_loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deberta/modeling_tf_deberta.py b/venv/lib/python3.10/site-packages/transformers/models/deberta/modeling_tf_deberta.py
new file mode 100644
index 0000000000000000000000000000000000000000..3cef6a50c873f438cd894b8231d890858ada44c2
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/deberta/modeling_tf_deberta.py
@@ -0,0 +1,1644 @@
+# coding=utf-8
+# Copyright 2021 Microsoft and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" TF 2.0 DeBERTa model."""
+
+
+from __future__ import annotations
+
+import math
+from typing import Dict, Optional, Sequence, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ...activations_tf import get_tf_activation
+from ...modeling_tf_outputs import (
+ TFBaseModelOutput,
+ TFMaskedLMOutput,
+ TFQuestionAnsweringModelOutput,
+ TFSequenceClassifierOutput,
+ TFTokenClassifierOutput,
+)
+from ...modeling_tf_utils import (
+ TFMaskedLanguageModelingLoss,
+ TFModelInputType,
+ TFPreTrainedModel,
+ TFQuestionAnsweringLoss,
+ TFSequenceClassificationLoss,
+ TFTokenClassificationLoss,
+ get_initializer,
+ keras,
+ unpack_inputs,
+)
+from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
+from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
+from .configuration_deberta import DebertaConfig
+
+
+logger = logging.get_logger(__name__)
+
+
+_CONFIG_FOR_DOC = "DebertaConfig"
+_CHECKPOINT_FOR_DOC = "kamalkraj/deberta-base"
+
+
+from ..deprecated._archive_maps import TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+class TFDebertaContextPooler(keras.layers.Layer):
+ def __init__(self, config: DebertaConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.dense = keras.layers.Dense(config.pooler_hidden_size, name="dense")
+ self.dropout = TFDebertaStableDropout(config.pooler_dropout, name="dropout")
+ self.config = config
+
+ def call(self, hidden_states, training: bool = False):
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ context_token = hidden_states[:, 0]
+ context_token = self.dropout(context_token, training=training)
+ pooled_output = self.dense(context_token)
+ pooled_output = get_tf_activation(self.config.pooler_hidden_act)(pooled_output)
+ return pooled_output
+
+ @property
+ def output_dim(self) -> int:
+ return self.config.hidden_size
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.pooler_hidden_size])
+ if getattr(self, "dropout", None) is not None:
+ with tf.name_scope(self.dropout.name):
+ self.dropout.build(None)
+
+
+class TFDebertaXSoftmax(keras.layers.Layer):
+ """
+ Masked Softmax which is optimized for saving memory
+
+ Args:
+ input (`tf.Tensor`): The input tensor that will apply softmax.
+ mask (`tf.Tensor`): The mask matrix where 0 indicate that element will be ignored in the softmax calculation.
+ dim (int): The dimension that will apply softmax
+ """
+
+ def __init__(self, axis=-1, **kwargs):
+ super().__init__(**kwargs)
+ self.axis = axis
+
+ def call(self, inputs: tf.Tensor, mask: tf.Tensor):
+ rmask = tf.logical_not(tf.cast(mask, tf.bool))
+ output = tf.where(rmask, float("-inf"), inputs)
+ output = stable_softmax(output, self.axis)
+ output = tf.where(rmask, 0.0, output)
+ return output
+
+
+class TFDebertaStableDropout(keras.layers.Layer):
+ """
+ Optimized dropout module for stabilizing the training
+
+ Args:
+ drop_prob (float): the dropout probabilities
+ """
+
+ def __init__(self, drop_prob, **kwargs):
+ super().__init__(**kwargs)
+ self.drop_prob = drop_prob
+
+ @tf.custom_gradient
+ def xdropout(self, inputs):
+ """
+ Applies dropout to the inputs, as vanilla dropout, but also scales the remaining elements up by 1/drop_prob.
+ """
+ mask = tf.cast(
+ 1
+ - tf.compat.v1.distributions.Bernoulli(probs=1.0 - self.drop_prob).sample(sample_shape=shape_list(inputs)),
+ tf.bool,
+ )
+ scale = tf.convert_to_tensor(1.0 / (1 - self.drop_prob), dtype=tf.float32)
+ if self.drop_prob > 0:
+ inputs = tf.where(mask, 0.0, inputs) * scale
+
+ def grad(upstream):
+ if self.drop_prob > 0:
+ return tf.where(mask, 0.0, upstream) * scale
+ else:
+ return upstream
+
+ return inputs, grad
+
+ def call(self, inputs: tf.Tensor, training: tf.Tensor = False):
+ if training:
+ return self.xdropout(inputs)
+ return inputs
+
+
+class TFDebertaLayerNorm(keras.layers.Layer):
+ """LayerNorm module in the TF style (epsilon inside the square root)."""
+
+ def __init__(self, size, eps=1e-12, **kwargs):
+ super().__init__(**kwargs)
+ self.size = size
+ self.eps = eps
+
+ def build(self, input_shape):
+ self.gamma = self.add_weight(shape=[self.size], initializer=tf.ones_initializer(), name="weight")
+ self.beta = self.add_weight(shape=[self.size], initializer=tf.zeros_initializer(), name="bias")
+ return super().build(input_shape)
+
+ def call(self, x: tf.Tensor) -> tf.Tensor:
+ mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
+ variance = tf.reduce_mean(tf.square(x - mean), axis=[-1], keepdims=True)
+ std = tf.math.sqrt(variance + self.eps)
+ return self.gamma * (x - mean) / std + self.beta
+
+
+class TFDebertaSelfOutput(keras.layers.Layer):
+ def __init__(self, config: DebertaConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.dense = keras.layers.Dense(config.hidden_size, name="dense")
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+ self.dropout = TFDebertaStableDropout(config.hidden_dropout_prob, name="dropout")
+ self.config = config
+
+ def call(self, hidden_states, input_tensor, training: bool = False):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.hidden_size])
+ if getattr(self, "dropout", None) is not None:
+ with tf.name_scope(self.dropout.name):
+ self.dropout.build(None)
+
+
+class TFDebertaAttention(keras.layers.Layer):
+ def __init__(self, config: DebertaConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.self = TFDebertaDisentangledSelfAttention(config, name="self")
+ self.dense_output = TFDebertaSelfOutput(config, name="output")
+ self.config = config
+
+ def call(
+ self,
+ input_tensor: tf.Tensor,
+ attention_mask: tf.Tensor,
+ query_states: tf.Tensor = None,
+ relative_pos: tf.Tensor = None,
+ rel_embeddings: tf.Tensor = None,
+ output_attentions: bool = False,
+ training: bool = False,
+ ) -> Tuple[tf.Tensor]:
+ self_outputs = self.self(
+ hidden_states=input_tensor,
+ attention_mask=attention_mask,
+ query_states=query_states,
+ relative_pos=relative_pos,
+ rel_embeddings=rel_embeddings,
+ output_attentions=output_attentions,
+ training=training,
+ )
+ if query_states is None:
+ query_states = input_tensor
+ attention_output = self.dense_output(
+ hidden_states=self_outputs[0], input_tensor=query_states, training=training
+ )
+
+ output = (attention_output,) + self_outputs[1:]
+
+ return output
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "self", None) is not None:
+ with tf.name_scope(self.self.name):
+ self.self.build(None)
+ if getattr(self, "dense_output", None) is not None:
+ with tf.name_scope(self.dense_output.name):
+ self.dense_output.build(None)
+
+
+class TFDebertaIntermediate(keras.layers.Layer):
+ def __init__(self, config: DebertaConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = keras.layers.Dense(
+ units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
+ else:
+ self.intermediate_act_fn = config.hidden_act
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ hidden_states = self.dense(inputs=hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+
+
+class TFDebertaOutput(keras.layers.Layer):
+ def __init__(self, config: DebertaConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = keras.layers.Dense(
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+ self.dropout = TFDebertaStableDropout(config.hidden_dropout_prob, name="dropout")
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
+ hidden_states = self.dense(inputs=hidden_states)
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.intermediate_size])
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.hidden_size])
+ if getattr(self, "dropout", None) is not None:
+ with tf.name_scope(self.dropout.name):
+ self.dropout.build(None)
+
+
+class TFDebertaLayer(keras.layers.Layer):
+ def __init__(self, config: DebertaConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.attention = TFDebertaAttention(config, name="attention")
+ self.intermediate = TFDebertaIntermediate(config, name="intermediate")
+ self.bert_output = TFDebertaOutput(config, name="output")
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ attention_mask: tf.Tensor,
+ query_states: tf.Tensor = None,
+ relative_pos: tf.Tensor = None,
+ rel_embeddings: tf.Tensor = None,
+ output_attentions: bool = False,
+ training: bool = False,
+ ) -> Tuple[tf.Tensor]:
+ attention_outputs = self.attention(
+ input_tensor=hidden_states,
+ attention_mask=attention_mask,
+ query_states=query_states,
+ relative_pos=relative_pos,
+ rel_embeddings=rel_embeddings,
+ output_attentions=output_attentions,
+ training=training,
+ )
+ attention_output = attention_outputs[0]
+ intermediate_output = self.intermediate(hidden_states=attention_output)
+ layer_output = self.bert_output(
+ hidden_states=intermediate_output, input_tensor=attention_output, training=training
+ )
+ outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "attention", None) is not None:
+ with tf.name_scope(self.attention.name):
+ self.attention.build(None)
+ if getattr(self, "intermediate", None) is not None:
+ with tf.name_scope(self.intermediate.name):
+ self.intermediate.build(None)
+ if getattr(self, "bert_output", None) is not None:
+ with tf.name_scope(self.bert_output.name):
+ self.bert_output.build(None)
+
+
+class TFDebertaEncoder(keras.layers.Layer):
+ def __init__(self, config: DebertaConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.layer = [TFDebertaLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
+ self.relative_attention = getattr(config, "relative_attention", False)
+ self.config = config
+ if self.relative_attention:
+ self.max_relative_positions = getattr(config, "max_relative_positions", -1)
+ if self.max_relative_positions < 1:
+ self.max_relative_positions = config.max_position_embeddings
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if self.relative_attention:
+ self.rel_embeddings = self.add_weight(
+ name="rel_embeddings.weight",
+ shape=[self.max_relative_positions * 2, self.config.hidden_size],
+ initializer=get_initializer(self.config.initializer_range),
+ )
+ if getattr(self, "layer", None) is not None:
+ for layer in self.layer:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+ def get_rel_embedding(self):
+ rel_embeddings = self.rel_embeddings if self.relative_attention else None
+ return rel_embeddings
+
+ def get_attention_mask(self, attention_mask):
+ if len(shape_list(attention_mask)) <= 2:
+ extended_attention_mask = tf.expand_dims(tf.expand_dims(attention_mask, 1), 2)
+ attention_mask = extended_attention_mask * tf.expand_dims(tf.squeeze(extended_attention_mask, -2), -1)
+ attention_mask = tf.cast(attention_mask, tf.uint8)
+ elif len(shape_list(attention_mask)) == 3:
+ attention_mask = tf.expand_dims(attention_mask, 1)
+
+ return attention_mask
+
+ def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):
+ if self.relative_attention and relative_pos is None:
+ q = shape_list(query_states)[-2] if query_states is not None else shape_list(hidden_states)[-2]
+ relative_pos = build_relative_position(q, shape_list(hidden_states)[-2])
+ return relative_pos
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ attention_mask: tf.Tensor,
+ query_states: tf.Tensor = None,
+ relative_pos: tf.Tensor = None,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ training: bool = False,
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ attention_mask = self.get_attention_mask(attention_mask)
+ relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)
+
+ if isinstance(hidden_states, Sequence):
+ next_kv = hidden_states[0]
+ else:
+ next_kv = hidden_states
+
+ rel_embeddings = self.get_rel_embedding()
+
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_outputs = layer_module(
+ hidden_states=next_kv,
+ attention_mask=attention_mask,
+ query_states=query_states,
+ relative_pos=relative_pos,
+ rel_embeddings=rel_embeddings,
+ output_attentions=output_attentions,
+ training=training,
+ )
+ hidden_states = layer_outputs[0]
+
+ if query_states is not None:
+ query_states = hidden_states
+ if isinstance(hidden_states, Sequence):
+ next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None
+ else:
+ next_kv = hidden_states
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ # Add last layer
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
+
+ return TFBaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
+ )
+
+
+def build_relative_position(query_size, key_size):
+ """
+ Build relative position according to the query and key
+
+ We assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key
+ \\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q -
+ P_k\\)
+
+ Args:
+ query_size (int): the length of query
+ key_size (int): the length of key
+
+ Return:
+ `tf.Tensor`: A tensor with shape [1, query_size, key_size]
+
+ """
+ q_ids = tf.range(query_size, dtype=tf.int32)
+ k_ids = tf.range(key_size, dtype=tf.int32)
+ rel_pos_ids = q_ids[:, None] - tf.tile(tf.reshape(k_ids, [1, -1]), [query_size, 1])
+ rel_pos_ids = rel_pos_ids[:query_size, :]
+ rel_pos_ids = tf.expand_dims(rel_pos_ids, axis=0)
+ return tf.cast(rel_pos_ids, tf.int64)
+
+
+def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):
+ shapes = [
+ shape_list(query_layer)[0],
+ shape_list(query_layer)[1],
+ shape_list(query_layer)[2],
+ shape_list(relative_pos)[-1],
+ ]
+ return tf.broadcast_to(c2p_pos, shapes)
+
+
+def p2c_dynamic_expand(c2p_pos, query_layer, key_layer):
+ shapes = [
+ shape_list(query_layer)[0],
+ shape_list(query_layer)[1],
+ shape_list(key_layer)[-2],
+ shape_list(key_layer)[-2],
+ ]
+ return tf.broadcast_to(c2p_pos, shapes)
+
+
+def pos_dynamic_expand(pos_index, p2c_att, key_layer):
+ shapes = shape_list(p2c_att)[:2] + [shape_list(pos_index)[-2], shape_list(key_layer)[-2]]
+ return tf.broadcast_to(pos_index, shapes)
+
+
+def torch_gather(x, indices, gather_axis):
+ if gather_axis < 0:
+ gather_axis = tf.rank(x) + gather_axis
+
+ if gather_axis != tf.rank(x) - 1:
+ pre_roll = tf.rank(x) - 1 - gather_axis
+ permutation = tf.roll(tf.range(tf.rank(x)), pre_roll, axis=0)
+ x = tf.transpose(x, perm=permutation)
+ indices = tf.transpose(indices, perm=permutation)
+ else:
+ pre_roll = 0
+
+ flat_x = tf.reshape(x, (-1, tf.shape(x)[-1]))
+ flat_indices = tf.reshape(indices, (-1, tf.shape(indices)[-1]))
+ gathered = tf.gather(flat_x, flat_indices, batch_dims=1)
+ gathered = tf.reshape(gathered, tf.shape(indices))
+
+ if pre_roll != 0:
+ permutation = tf.roll(tf.range(tf.rank(x)), -pre_roll, axis=0)
+ gathered = tf.transpose(gathered, perm=permutation)
+
+ return gathered
+
+
+class TFDebertaDisentangledSelfAttention(keras.layers.Layer):
+ """
+ Disentangled self-attention module
+
+ Parameters:
+ config (`str`):
+ A model config class instance with the configuration to build a new model. The schema is similar to
+ *BertConfig*, for more details, please refer [`DebertaConfig`]
+
+ """
+
+ def __init__(self, config: DebertaConfig, **kwargs):
+ super().__init__(**kwargs)
+ if config.hidden_size % config.num_attention_heads != 0:
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads})"
+ )
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+ self.in_proj = keras.layers.Dense(
+ self.all_head_size * 3,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="in_proj",
+ use_bias=False,
+ )
+ self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else []
+
+ self.relative_attention = getattr(config, "relative_attention", False)
+ self.talking_head = getattr(config, "talking_head", False)
+
+ if self.talking_head:
+ self.head_logits_proj = keras.layers.Dense(
+ self.num_attention_heads,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="head_logits_proj",
+ use_bias=False,
+ )
+ self.head_weights_proj = keras.layers.Dense(
+ self.num_attention_heads,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="head_weights_proj",
+ use_bias=False,
+ )
+
+ self.softmax = TFDebertaXSoftmax(axis=-1)
+
+ if self.relative_attention:
+ self.max_relative_positions = getattr(config, "max_relative_positions", -1)
+ if self.max_relative_positions < 1:
+ self.max_relative_positions = config.max_position_embeddings
+ self.pos_dropout = TFDebertaStableDropout(config.hidden_dropout_prob, name="pos_dropout")
+ if "c2p" in self.pos_att_type:
+ self.pos_proj = keras.layers.Dense(
+ self.all_head_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="pos_proj",
+ use_bias=False,
+ )
+ if "p2c" in self.pos_att_type:
+ self.pos_q_proj = keras.layers.Dense(
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="pos_q_proj"
+ )
+
+ self.dropout = TFDebertaStableDropout(config.attention_probs_dropout_prob, name="dropout")
+ self.config = config
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ self.q_bias = self.add_weight(
+ name="q_bias", shape=(self.all_head_size), initializer=keras.initializers.Zeros()
+ )
+ self.v_bias = self.add_weight(
+ name="v_bias", shape=(self.all_head_size), initializer=keras.initializers.Zeros()
+ )
+ if getattr(self, "in_proj", None) is not None:
+ with tf.name_scope(self.in_proj.name):
+ self.in_proj.build([None, None, self.config.hidden_size])
+ if getattr(self, "dropout", None) is not None:
+ with tf.name_scope(self.dropout.name):
+ self.dropout.build(None)
+ if getattr(self, "head_logits_proj", None) is not None:
+ with tf.name_scope(self.head_logits_proj.name):
+ self.head_logits_proj.build(None)
+ if getattr(self, "head_weights_proj", None) is not None:
+ with tf.name_scope(self.head_weights_proj.name):
+ self.head_weights_proj.build(None)
+ if getattr(self, "pos_dropout", None) is not None:
+ with tf.name_scope(self.pos_dropout.name):
+ self.pos_dropout.build(None)
+ if getattr(self, "pos_proj", None) is not None:
+ with tf.name_scope(self.pos_proj.name):
+ self.pos_proj.build([self.config.hidden_size])
+ if getattr(self, "pos_q_proj", None) is not None:
+ with tf.name_scope(self.pos_q_proj.name):
+ self.pos_q_proj.build([self.config.hidden_size])
+
+ def transpose_for_scores(self, tensor: tf.Tensor) -> tf.Tensor:
+ shape = shape_list(tensor)[:-1] + [self.num_attention_heads, -1]
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
+ tensor = tf.reshape(tensor=tensor, shape=shape)
+
+ # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
+ return tf.transpose(tensor, perm=[0, 2, 1, 3])
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ attention_mask: tf.Tensor,
+ query_states: tf.Tensor = None,
+ relative_pos: tf.Tensor = None,
+ rel_embeddings: tf.Tensor = None,
+ output_attentions: bool = False,
+ training: bool = False,
+ ) -> Tuple[tf.Tensor]:
+ """
+ Call the module
+
+ Args:
+ hidden_states (`tf.Tensor`):
+ Input states to the module usually the output from previous layer, it will be the Q,K and V in
+ *Attention(Q,K,V)*
+
+ attention_mask (`tf.Tensor`):
+ An attention mask matrix of shape [*B*, *N*, *N*] where *B* is the batch size, *N* is the maximum
+ sequence length in which element [i,j] = *1* means the *i* th token in the input can attend to the *j*
+ th token.
+
+ return_att (`bool`, optional):
+ Whether return the attention matrix.
+
+ query_states (`tf.Tensor`, optional):
+ The *Q* state in *Attention(Q,K,V)*.
+
+ relative_pos (`tf.Tensor`):
+ The relative position encoding between the tokens in the sequence. It's of shape [*B*, *N*, *N*] with
+ values ranging in [*-max_relative_positions*, *max_relative_positions*].
+
+ rel_embeddings (`tf.Tensor`):
+ The embedding of relative distances. It's a tensor of shape [\\(2 \\times
+ \\text{max_relative_positions}\\), *hidden_size*].
+
+
+ """
+ if query_states is None:
+ qp = self.in_proj(hidden_states) # .split(self.all_head_size, dim=-1)
+ query_layer, key_layer, value_layer = tf.split(
+ self.transpose_for_scores(qp), num_or_size_splits=3, axis=-1
+ )
+ else:
+
+ def linear(w, b, x):
+ out = tf.matmul(x, w, transpose_b=True)
+ if b is not None:
+ out += tf.transpose(b)
+ return out
+
+ ws = tf.split(
+ tf.transpose(self.in_proj.weight[0]), num_or_size_splits=self.num_attention_heads * 3, axis=0
+ )
+ qkvw = tf.TensorArray(dtype=tf.float32, size=3)
+ for k in tf.range(3):
+ qkvw_inside = tf.TensorArray(dtype=tf.float32, size=self.num_attention_heads)
+ for i in tf.range(self.num_attention_heads):
+ qkvw_inside = qkvw_inside.write(i, ws[i * 3 + k])
+ qkvw = qkvw.write(k, qkvw_inside.concat())
+ qkvb = [None] * 3
+
+ q = linear(qkvw[0], qkvb[0], query_states)
+ k = linear(qkvw[1], qkvb[1], hidden_states)
+ v = linear(qkvw[2], qkvb[2], hidden_states)
+ query_layer = self.transpose_for_scores(q)
+ key_layer = self.transpose_for_scores(k)
+ value_layer = self.transpose_for_scores(v)
+
+ query_layer = query_layer + self.transpose_for_scores(self.q_bias[None, None, :])
+ value_layer = value_layer + self.transpose_for_scores(self.v_bias[None, None, :])
+
+ rel_att = None
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ scale_factor = 1 + len(self.pos_att_type)
+ scale = math.sqrt(shape_list(query_layer)[-1] * scale_factor)
+ query_layer = query_layer / scale
+
+ attention_scores = tf.matmul(query_layer, tf.transpose(key_layer, [0, 1, 3, 2]))
+ if self.relative_attention:
+ rel_embeddings = self.pos_dropout(rel_embeddings, training=training)
+ rel_att = self.disentangled_att_bias(query_layer, key_layer, relative_pos, rel_embeddings, scale_factor)
+
+ if rel_att is not None:
+ attention_scores = attention_scores + rel_att
+
+ if self.talking_head:
+ attention_scores = tf.transpose(
+ self.head_logits_proj(tf.transpose(attention_scores, [0, 2, 3, 1])), [0, 3, 1, 2]
+ )
+
+ attention_probs = self.softmax(attention_scores, attention_mask)
+ attention_probs = self.dropout(attention_probs, training=training)
+ if self.talking_head:
+ attention_probs = tf.transpose(
+ self.head_weights_proj(tf.transpose(attention_probs, [0, 2, 3, 1])), [0, 3, 1, 2]
+ )
+
+ context_layer = tf.matmul(attention_probs, value_layer)
+ context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
+ context_layer_shape = shape_list(context_layer)
+ # Set the final dimension here explicitly.
+ # Calling tf.reshape(context_layer, (*context_layer_shape[:-2], -1)) raises an error when executing
+ # the model in graph mode as context_layer is reshaped to (None, 7, None) and Dense layer in TFDebertaV2SelfOutput
+ # requires final input dimension to be defined
+ new_context_layer_shape = context_layer_shape[:-2] + [context_layer_shape[-2] * context_layer_shape[-1]]
+ context_layer = tf.reshape(context_layer, new_context_layer_shape)
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+ return outputs
+
+ def disentangled_att_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):
+ if relative_pos is None:
+ q = shape_list(query_layer)[-2]
+ relative_pos = build_relative_position(q, shape_list(key_layer)[-2])
+ shape_list_pos = shape_list(relative_pos)
+ if len(shape_list_pos) == 2:
+ relative_pos = tf.expand_dims(tf.expand_dims(relative_pos, 0), 0)
+ elif len(shape_list_pos) == 3:
+ relative_pos = tf.expand_dims(relative_pos, 1)
+ # bxhxqxk
+ elif len(shape_list_pos) != 4:
+ raise ValueError(f"Relative position ids must be of dim 2 or 3 or 4. {len(shape_list_pos)}")
+
+ att_span = tf.cast(
+ tf.minimum(
+ tf.maximum(shape_list(query_layer)[-2], shape_list(key_layer)[-2]), self.max_relative_positions
+ ),
+ tf.int64,
+ )
+ rel_embeddings = tf.expand_dims(
+ rel_embeddings[self.max_relative_positions - att_span : self.max_relative_positions + att_span, :], 0
+ )
+
+ score = 0
+
+ # content->position
+ if "c2p" in self.pos_att_type:
+ pos_key_layer = self.pos_proj(rel_embeddings)
+ pos_key_layer = self.transpose_for_scores(pos_key_layer)
+ c2p_att = tf.matmul(query_layer, tf.transpose(pos_key_layer, [0, 1, 3, 2]))
+ c2p_pos = tf.clip_by_value(relative_pos + att_span, 0, att_span * 2 - 1)
+ c2p_att = torch_gather(c2p_att, c2p_dynamic_expand(c2p_pos, query_layer, relative_pos), -1)
+ score += c2p_att
+
+ # position->content
+ if "p2c" in self.pos_att_type:
+ pos_query_layer = self.pos_q_proj(rel_embeddings)
+ pos_query_layer = self.transpose_for_scores(pos_query_layer)
+ pos_query_layer /= tf.math.sqrt(tf.cast(shape_list(pos_query_layer)[-1] * scale_factor, dtype=tf.float32))
+ if shape_list(query_layer)[-2] != shape_list(key_layer)[-2]:
+ r_pos = build_relative_position(shape_list(key_layer)[-2], shape_list(key_layer)[-2])
+ else:
+ r_pos = relative_pos
+ p2c_pos = tf.clip_by_value(-r_pos + att_span, 0, att_span * 2 - 1)
+ p2c_att = tf.matmul(key_layer, tf.transpose(pos_query_layer, [0, 1, 3, 2]))
+ p2c_att = tf.transpose(
+ torch_gather(p2c_att, p2c_dynamic_expand(p2c_pos, query_layer, key_layer), -1), [0, 1, 3, 2]
+ )
+ if shape_list(query_layer)[-2] != shape_list(key_layer)[-2]:
+ pos_index = tf.expand_dims(relative_pos[:, :, :, 0], -1)
+ p2c_att = torch_gather(p2c_att, pos_dynamic_expand(pos_index, p2c_att, key_layer), -2)
+ score += p2c_att
+
+ return score
+
+
+class TFDebertaEmbeddings(keras.layers.Layer):
+ """Construct the embeddings from word, position and token_type embeddings."""
+
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
+ self.hidden_size = config.hidden_size
+ self.max_position_embeddings = config.max_position_embeddings
+ self.position_biased_input = getattr(config, "position_biased_input", True)
+ self.initializer_range = config.initializer_range
+ if self.embedding_size != config.hidden_size:
+ self.embed_proj = keras.layers.Dense(
+ config.hidden_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="embed_proj",
+ use_bias=False,
+ )
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+ self.dropout = TFDebertaStableDropout(config.hidden_dropout_prob, name="dropout")
+
+ def build(self, input_shape=None):
+ with tf.name_scope("word_embeddings"):
+ self.weight = self.add_weight(
+ name="weight",
+ shape=[self.config.vocab_size, self.embedding_size],
+ initializer=get_initializer(self.initializer_range),
+ )
+
+ with tf.name_scope("token_type_embeddings"):
+ if self.config.type_vocab_size > 0:
+ self.token_type_embeddings = self.add_weight(
+ name="embeddings",
+ shape=[self.config.type_vocab_size, self.embedding_size],
+ initializer=get_initializer(self.initializer_range),
+ )
+ else:
+ self.token_type_embeddings = None
+
+ with tf.name_scope("position_embeddings"):
+ if self.position_biased_input:
+ self.position_embeddings = self.add_weight(
+ name="embeddings",
+ shape=[self.max_position_embeddings, self.hidden_size],
+ initializer=get_initializer(self.initializer_range),
+ )
+ else:
+ self.position_embeddings = None
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.hidden_size])
+ if getattr(self, "dropout", None) is not None:
+ with tf.name_scope(self.dropout.name):
+ self.dropout.build(None)
+ if getattr(self, "embed_proj", None) is not None:
+ with tf.name_scope(self.embed_proj.name):
+ self.embed_proj.build([None, None, self.embedding_size])
+
+ def call(
+ self,
+ input_ids: tf.Tensor = None,
+ position_ids: tf.Tensor = None,
+ token_type_ids: tf.Tensor = None,
+ inputs_embeds: tf.Tensor = None,
+ mask: tf.Tensor = None,
+ training: bool = False,
+ ) -> tf.Tensor:
+ """
+ Applies embedding based on inputs tensor.
+
+ Returns:
+ final_embeddings (`tf.Tensor`): output embedding tensor.
+ """
+ if input_ids is None and inputs_embeds is None:
+ raise ValueError("Need to provide either `input_ids` or `input_embeds`.")
+
+ if input_ids is not None:
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
+
+ input_shape = shape_list(inputs_embeds)[:-1]
+
+ if token_type_ids is None:
+ token_type_ids = tf.fill(dims=input_shape, value=0)
+
+ if position_ids is None:
+ position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
+
+ final_embeddings = inputs_embeds
+ if self.position_biased_input:
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
+ final_embeddings += position_embeds
+ if self.config.type_vocab_size > 0:
+ token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
+ final_embeddings += token_type_embeds
+
+ if self.embedding_size != self.hidden_size:
+ final_embeddings = self.embed_proj(final_embeddings)
+
+ final_embeddings = self.LayerNorm(final_embeddings)
+
+ if mask is not None:
+ if len(shape_list(mask)) != len(shape_list(final_embeddings)):
+ if len(shape_list(mask)) == 4:
+ mask = tf.squeeze(tf.squeeze(mask, axis=1), axis=1)
+ mask = tf.cast(tf.expand_dims(mask, axis=2), tf.float32)
+
+ final_embeddings = final_embeddings * mask
+
+ final_embeddings = self.dropout(final_embeddings, training=training)
+
+ return final_embeddings
+
+
+class TFDebertaPredictionHeadTransform(keras.layers.Layer):
+ def __init__(self, config: DebertaConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
+
+ self.dense = keras.layers.Dense(
+ units=self.embedding_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="dense",
+ )
+
+ if isinstance(config.hidden_act, str):
+ self.transform_act_fn = get_tf_activation(config.hidden_act)
+ else:
+ self.transform_act_fn = config.hidden_act
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ hidden_states = self.dense(inputs=hidden_states)
+ hidden_states = self.transform_act_fn(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states)
+
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.embedding_size])
+
+
+class TFDebertaLMPredictionHead(keras.layers.Layer):
+ def __init__(self, config: DebertaConfig, input_embeddings: keras.layers.Layer, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
+
+ self.transform = TFDebertaPredictionHeadTransform(config, name="transform")
+
+ # The output weights are the same as the input embeddings, but there is
+ # an output-only bias for each token.
+ self.input_embeddings = input_embeddings
+
+ def build(self, input_shape=None):
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "transform", None) is not None:
+ with tf.name_scope(self.transform.name):
+ self.transform.build(None)
+
+ def get_output_embeddings(self) -> keras.layers.Layer:
+ return self.input_embeddings
+
+ def set_output_embeddings(self, value: tf.Variable):
+ self.input_embeddings.weight = value
+ self.input_embeddings.vocab_size = shape_list(value)[0]
+
+ def get_bias(self) -> Dict[str, tf.Variable]:
+ return {"bias": self.bias}
+
+ def set_bias(self, value: tf.Variable):
+ self.bias = value["bias"]
+ self.config.vocab_size = shape_list(value["bias"])[0]
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ hidden_states = self.transform(hidden_states=hidden_states)
+ seq_length = shape_list(hidden_states)[1]
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size])
+ hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
+
+ return hidden_states
+
+
+class TFDebertaOnlyMLMHead(keras.layers.Layer):
+ def __init__(self, config: DebertaConfig, input_embeddings: keras.layers.Layer, **kwargs):
+ super().__init__(**kwargs)
+ self.predictions = TFDebertaLMPredictionHead(config, input_embeddings, name="predictions")
+
+ def call(self, sequence_output: tf.Tensor) -> tf.Tensor:
+ prediction_scores = self.predictions(hidden_states=sequence_output)
+
+ return prediction_scores
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "predictions", None) is not None:
+ with tf.name_scope(self.predictions.name):
+ self.predictions.build(None)
+
+
+# @keras_serializable
+class TFDebertaMainLayer(keras.layers.Layer):
+ config_class = DebertaConfig
+
+ def __init__(self, config: DebertaConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+
+ self.embeddings = TFDebertaEmbeddings(config, name="embeddings")
+ self.encoder = TFDebertaEncoder(config, name="encoder")
+
+ def get_input_embeddings(self) -> keras.layers.Layer:
+ return self.embeddings
+
+ def set_input_embeddings(self, value: tf.Variable):
+ self.embeddings.weight = value
+ self.embeddings.vocab_size = shape_list(value)[0]
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ raise NotImplementedError
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = shape_list(input_ids)
+ elif inputs_embeds is not None:
+ input_shape = shape_list(inputs_embeds)[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if attention_mask is None:
+ attention_mask = tf.fill(dims=input_shape, value=1)
+
+ if token_type_ids is None:
+ token_type_ids = tf.fill(dims=input_shape, value=0)
+
+ embedding_output = self.embeddings(
+ input_ids=input_ids,
+ position_ids=position_ids,
+ token_type_ids=token_type_ids,
+ inputs_embeds=inputs_embeds,
+ mask=attention_mask,
+ training=training,
+ )
+
+ encoder_outputs = self.encoder(
+ hidden_states=embedding_output,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ sequence_output = encoder_outputs[0]
+
+ if not return_dict:
+ return (sequence_output,) + encoder_outputs[1:]
+
+ return TFBaseModelOutput(
+ last_hidden_state=sequence_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embeddings", None) is not None:
+ with tf.name_scope(self.embeddings.name):
+ self.embeddings.build(None)
+ if getattr(self, "encoder", None) is not None:
+ with tf.name_scope(self.encoder.name):
+ self.encoder.build(None)
+
+
+class TFDebertaPreTrainedModel(TFPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = DebertaConfig
+ base_model_prefix = "deberta"
+
+
+DEBERTA_START_DOCSTRING = r"""
+ The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled
+ Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build
+ on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two
+ improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TensorFlow models and layers in `transformers` accept two formats as input:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional argument.
+
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
+ positional argument:
+
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
+
+ Note that when creating models and layers with
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
+ about any of this, as you can just pass inputs like you would to any other Python function!
+
+
+
+ Parameters:
+ config ([`DebertaConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+DEBERTA_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput``] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.",
+ DEBERTA_START_DOCSTRING,
+)
+class TFDebertaModel(TFDebertaPreTrainedModel):
+ def __init__(self, config: DebertaConfig, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.deberta = TFDebertaMainLayer(config, name="deberta")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFBaseModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
+ outputs = self.deberta(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "deberta", None) is not None:
+ with tf.name_scope(self.deberta.name):
+ self.deberta.build(None)
+
+
+@add_start_docstrings("""DeBERTa Model with a `language modeling` head on top.""", DEBERTA_START_DOCSTRING)
+class TFDebertaForMaskedLM(TFDebertaPreTrainedModel, TFMaskedLanguageModelingLoss):
+ def __init__(self, config: DebertaConfig, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ if config.is_decoder:
+ logger.warning(
+ "If you want to use `TFDebertaForMaskedLM` make sure `config.is_decoder=False` for "
+ "bi-directional self-attention."
+ )
+
+ self.deberta = TFDebertaMainLayer(config, name="deberta")
+ self.mlm = TFDebertaOnlyMLMHead(config, input_embeddings=self.deberta.embeddings, name="cls")
+
+ def get_lm_head(self) -> keras.layers.Layer:
+ return self.mlm.predictions
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFMaskedLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ """
+ outputs = self.deberta(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+ prediction_scores = self.mlm(sequence_output=sequence_output, training=training)
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=prediction_scores)
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFMaskedLMOutput(
+ loss=loss,
+ logits=prediction_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "deberta", None) is not None:
+ with tf.name_scope(self.deberta.name):
+ self.deberta.build(None)
+ if getattr(self, "mlm", None) is not None:
+ with tf.name_scope(self.mlm.name):
+ self.mlm.build(None)
+
+
+@add_start_docstrings(
+ """
+ DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
+ pooled output) e.g. for GLUE tasks.
+ """,
+ DEBERTA_START_DOCSTRING,
+)
+class TFDebertaForSequenceClassification(TFDebertaPreTrainedModel, TFSequenceClassificationLoss):
+ def __init__(self, config: DebertaConfig, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.num_labels = config.num_labels
+
+ self.deberta = TFDebertaMainLayer(config, name="deberta")
+ self.pooler = TFDebertaContextPooler(config, name="pooler")
+
+ drop_out = getattr(config, "cls_dropout", None)
+ drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
+ self.dropout = TFDebertaStableDropout(drop_out, name="cls_dropout")
+ self.classifier = keras.layers.Dense(
+ units=config.num_labels,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="classifier",
+ )
+ self.output_dim = self.pooler.output_dim
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFSequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ outputs = self.deberta(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+ pooled_output = self.pooler(sequence_output, training=training)
+ pooled_output = self.dropout(pooled_output, training=training)
+ logits = self.classifier(pooled_output)
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+
+ return ((loss,) + output) if loss is not None else output
+
+ return TFSequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "deberta", None) is not None:
+ with tf.name_scope(self.deberta.name):
+ self.deberta.build(None)
+ if getattr(self, "pooler", None) is not None:
+ with tf.name_scope(self.pooler.name):
+ self.pooler.build(None)
+ if getattr(self, "dropout", None) is not None:
+ with tf.name_scope(self.dropout.name):
+ self.dropout.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.output_dim])
+
+
+@add_start_docstrings(
+ """
+ DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
+ Named-Entity-Recognition (NER) tasks.
+ """,
+ DEBERTA_START_DOCSTRING,
+)
+class TFDebertaForTokenClassification(TFDebertaPreTrainedModel, TFTokenClassificationLoss):
+ def __init__(self, config: DebertaConfig, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.num_labels = config.num_labels
+
+ self.deberta = TFDebertaMainLayer(config, name="deberta")
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
+ self.classifier = keras.layers.Dense(
+ units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFTokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+ outputs = self.deberta(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+ sequence_output = self.dropout(sequence_output, training=training)
+ logits = self.classifier(inputs=sequence_output)
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFTokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "deberta", None) is not None:
+ with tf.name_scope(self.deberta.name):
+ self.deberta.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.hidden_size])
+
+
+@add_start_docstrings(
+ """
+ DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ DEBERTA_START_DOCSTRING,
+)
+class TFDebertaForQuestionAnswering(TFDebertaPreTrainedModel, TFQuestionAnsweringLoss):
+ def __init__(self, config: DebertaConfig, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.num_labels = config.num_labels
+
+ self.deberta = TFDebertaMainLayer(config, name="deberta")
+ self.qa_outputs = keras.layers.Dense(
+ units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFQuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ start_positions: np.ndarray | tf.Tensor | None = None,
+ end_positions: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
+ r"""
+ start_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ outputs = self.deberta(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+ logits = self.qa_outputs(inputs=sequence_output)
+ start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1)
+ start_logits = tf.squeeze(input=start_logits, axis=-1)
+ end_logits = tf.squeeze(input=end_logits, axis=-1)
+ loss = None
+
+ if start_positions is not None and end_positions is not None:
+ labels = {"start_position": start_positions}
+ labels["end_position"] = end_positions
+ loss = self.hf_compute_loss(labels=labels, logits=(start_logits, end_logits))
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFQuestionAnsweringModelOutput(
+ loss=loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "deberta", None) is not None:
+ with tf.name_scope(self.deberta.name):
+ self.deberta.build(None)
+ if getattr(self, "qa_outputs", None) is not None:
+ with tf.name_scope(self.qa_outputs.name):
+ self.qa_outputs.build([None, None, self.config.hidden_size])
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deberta/tokenization_deberta.py b/venv/lib/python3.10/site-packages/transformers/models/deberta/tokenization_deberta.py
new file mode 100644
index 0000000000000000000000000000000000000000..b846a7891562d6386d40f342c47211a5b53857e4
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/deberta/tokenization_deberta.py
@@ -0,0 +1,393 @@
+# coding=utf-8
+# Copyright 2020 Microsoft and the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Tokenization class for model DeBERTa."""
+
+import json
+import os
+from typing import List, Optional, Tuple
+
+import regex as re
+
+from ...tokenization_utils import AddedToken, PreTrainedTokenizer
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
+
+
+# Copied from transformers.models.gpt2.tokenization_gpt2.bytes_to_unicode
+def bytes_to_unicode():
+ """
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
+ characters the bpe code barfs on.
+
+ The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
+ if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
+ decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
+ tables between utf-8 bytes and unicode strings.
+ """
+ bs = (
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
+ )
+ cs = bs[:]
+ n = 0
+ for b in range(2**8):
+ if b not in bs:
+ bs.append(b)
+ cs.append(2**8 + n)
+ n += 1
+ cs = [chr(n) for n in cs]
+ return dict(zip(bs, cs))
+
+
+# Copied from transformers.models.gpt2.tokenization_gpt2.get_pairs
+def get_pairs(word):
+ """
+ Return set of symbol pairs in a word.
+
+ Word is represented as tuple of symbols (symbols being variable-length strings).
+ """
+ pairs = set()
+ prev_char = word[0]
+ for char in word[1:]:
+ pairs.add((prev_char, char))
+ prev_char = char
+ return pairs
+
+
+class DebertaTokenizer(PreTrainedTokenizer):
+ """
+ Construct a DeBERTa tokenizer. Based on byte-level Byte-Pair-Encoding.
+
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
+
+ ```python
+ >>> from transformers import DebertaTokenizer
+
+ >>> tokenizer = DebertaTokenizer.from_pretrained("microsoft/deberta-base")
+ >>> tokenizer("Hello world")["input_ids"]
+ [1, 31414, 232, 2]
+
+ >>> tokenizer(" Hello world")["input_ids"]
+ [1, 20920, 232, 2]
+ ```
+
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
+ call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
+
+
+
+ When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
+
+
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ merges_file (`str`):
+ Path to the merges file.
+ errors (`str`, *optional*, defaults to `"replace"`):
+ Paradigm to follow when decoding bytes to UTF-8. See
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
+ bos_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The beginning of sequence token.
+ eos_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The end of sequence token.
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
+ The token used for padding, for example when batching sequences of different lengths.
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
+ other word. (Deberta tokenizer detect beginning of words by the preceding space).
+ add_bos_token (`bool`, *optional*, defaults to `False`):
+ Whether or not to add an initial <|endoftext|> to the input. This allows to treat the leading word just as
+ any other word.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "attention_mask", "token_type_ids"]
+
+ def __init__(
+ self,
+ vocab_file,
+ merges_file,
+ errors="replace",
+ bos_token="[CLS]",
+ eos_token="[SEP]",
+ sep_token="[SEP]",
+ cls_token="[CLS]",
+ unk_token="[UNK]",
+ pad_token="[PAD]",
+ mask_token="[MASK]",
+ add_prefix_space=False,
+ add_bos_token=False,
+ **kwargs,
+ ):
+ bos_token = AddedToken(bos_token, special=True) if isinstance(bos_token, str) else bos_token
+ eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token
+ sep_token = AddedToken(sep_token, special=True) if isinstance(sep_token, str) else sep_token
+ cls_token = AddedToken(cls_token, special=True) if isinstance(cls_token, str) else cls_token
+ unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token
+ pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token
+
+ # Mask token behave like a normal word, i.e. include the space before it
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
+ self.add_bos_token = add_bos_token
+
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
+ self.encoder = json.load(vocab_handle)
+ self.decoder = {v: k for k, v in self.encoder.items()}
+ self.errors = errors # how to handle errors in decoding
+ self.byte_encoder = bytes_to_unicode()
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
+ with open(merges_file, encoding="utf-8") as merges_handle:
+ bpe_merges = merges_handle.read().split("\n")[1:-1]
+ bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
+ self.cache = {}
+ self.add_prefix_space = add_prefix_space
+
+ # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
+ self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
+
+ super().__init__(
+ errors=errors,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ cls_token=cls_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ add_prefix_space=add_prefix_space,
+ add_bos_token=add_bos_token,
+ **kwargs,
+ )
+
+ @property
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.vocab_size
+ def vocab_size(self):
+ return len(self.encoder)
+
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.get_vocab
+ def get_vocab(self):
+ return dict(self.encoder, **self.added_tokens_encoder)
+
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.bpe
+ def bpe(self, token):
+ if token in self.cache:
+ return self.cache[token]
+ word = tuple(token)
+ pairs = get_pairs(word)
+
+ if not pairs:
+ return token
+
+ while True:
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
+ if bigram not in self.bpe_ranks:
+ break
+ first, second = bigram
+ new_word = []
+ i = 0
+ while i < len(word):
+ try:
+ j = word.index(first, i)
+ except ValueError:
+ new_word.extend(word[i:])
+ break
+ else:
+ new_word.extend(word[i:j])
+ i = j
+
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
+ new_word.append(first + second)
+ i += 2
+ else:
+ new_word.append(word[i])
+ i += 1
+ new_word = tuple(new_word)
+ word = new_word
+ if len(word) == 1:
+ break
+ else:
+ pairs = get_pairs(word)
+ word = " ".join(word)
+ self.cache[token] = word
+ return word
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A DeBERTa sequence has the following format:
+
+ - single sequence: [CLS] X [SEP]
+ - pair of sequences: [CLS] A [SEP] B [SEP]
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ return cls + token_ids_0 + sep + token_ids_1 + sep
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is None:
+ return [1] + ([0] * len(token_ids_0)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A DeBERTa
+ sequence pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._tokenize
+ def _tokenize(self, text):
+ """Tokenize a string."""
+ bpe_tokens = []
+ for token in re.findall(self.pat, text):
+ token = "".join(
+ self.byte_encoder[b] for b in token.encode("utf-8")
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
+ bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
+ return bpe_tokens
+
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_token_to_id
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
+
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_id_to_token
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.decoder.get(index)
+
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.convert_tokens_to_string
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ text = "".join(tokens)
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
+ return text
+
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.save_vocabulary
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+ merge_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
+ )
+
+ with open(vocab_file, "w", encoding="utf-8") as f:
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
+
+ index = 0
+ with open(merge_file, "w", encoding="utf-8") as writer:
+ writer.write("#version: 0.2\n")
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
+ if index != token_index:
+ logger.warning(
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
+ " Please check that the tokenizer is not corrupted!"
+ )
+ index = token_index
+ writer.write(" ".join(bpe_tokens) + "\n")
+ index += 1
+
+ return vocab_file, merge_file
+
+ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
+ add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
+ if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()):
+ text = " " + text
+ return (text, kwargs)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deberta/tokenization_deberta_fast.py b/venv/lib/python3.10/site-packages/transformers/models/deberta/tokenization_deberta_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..07226443d30a9c0cbe3d9f970e32343dac04ca65
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/deberta/tokenization_deberta_fast.py
@@ -0,0 +1,247 @@
+# coding=utf-8
+# Copyright 2020 Microsoft and the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Fast Tokenization class for model DeBERTa."""
+
+import json
+from typing import List, Optional, Tuple
+
+from tokenizers import pre_tokenizers
+
+from ...tokenization_utils_base import AddedToken, BatchEncoding
+from ...tokenization_utils_fast import PreTrainedTokenizerFast
+from ...utils import logging
+from .tokenization_deberta import DebertaTokenizer
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
+
+
+class DebertaTokenizerFast(PreTrainedTokenizerFast):
+ """
+ Construct a "fast" DeBERTa tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level
+ Byte-Pair-Encoding.
+
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
+
+ ```python
+ >>> from transformers import DebertaTokenizerFast
+
+ >>> tokenizer = DebertaTokenizerFast.from_pretrained("microsoft/deberta-base")
+ >>> tokenizer("Hello world")["input_ids"]
+ [1, 31414, 232, 2]
+
+ >>> tokenizer(" Hello world")["input_ids"]
+ [1, 20920, 232, 2]
+ ```
+
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since
+ the model was not pretrained this way, it might yield a decrease in performance.
+
+
+
+ When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
+
+
+
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
+ refer to this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`, *optional*):
+ Path to the vocabulary file.
+ merges_file (`str`, *optional*):
+ Path to the merges file.
+ tokenizer_file (`str`, *optional*):
+ The path to a tokenizer file to use instead of the vocab file.
+ errors (`str`, *optional*, defaults to `"replace"`):
+ Paradigm to follow when decoding bytes to UTF-8. See
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
+ bos_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The beginning of sequence token.
+ eos_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The end of sequence token.
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
+ The token used for padding, for example when batching sequences of different lengths.
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
+ other word. (Deberta tokenizer detect beginning of words by the preceding space).
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "attention_mask", "token_type_ids"]
+ slow_tokenizer_class = DebertaTokenizer
+
+ def __init__(
+ self,
+ vocab_file=None,
+ merges_file=None,
+ tokenizer_file=None,
+ errors="replace",
+ bos_token="[CLS]",
+ eos_token="[SEP]",
+ sep_token="[SEP]",
+ cls_token="[CLS]",
+ unk_token="[UNK]",
+ pad_token="[PAD]",
+ mask_token="[MASK]",
+ add_prefix_space=False,
+ **kwargs,
+ ):
+ super().__init__(
+ vocab_file,
+ merges_file,
+ tokenizer_file=tokenizer_file,
+ errors=errors,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ cls_token=cls_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ add_prefix_space=add_prefix_space,
+ **kwargs,
+ )
+ self.add_bos_token = kwargs.pop("add_bos_token", False)
+
+ pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
+ if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
+ pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type"))
+ pre_tok_state["add_prefix_space"] = add_prefix_space
+ self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
+
+ self.add_prefix_space = add_prefix_space
+
+ @property
+ def mask_token(self) -> str:
+ """
+ `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not
+ having been set.
+
+ Deberta tokenizer has a special mask token to be used in the fill-mask pipeline. The mask token will greedily
+ comprise the space before the *[MASK]*.
+ """
+ if self._mask_token is None:
+ if self.verbose:
+ logger.error("Using mask_token, but it is not set yet.")
+ return None
+ return str(self._mask_token)
+
+ @mask_token.setter
+ def mask_token(self, value):
+ """
+ Overriding the default behavior of the mask token to have it eat the space before it.
+ """
+ # Mask token behave like a normal word, i.e. include the space before it
+ # So we set lstrip to True
+ value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value
+ self._mask_token = value
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A DeBERTa sequence has the following format:
+
+ - single sequence: [CLS] X [SEP]
+ - pair of sequences: [CLS] A [SEP] B [SEP]
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ return cls + token_ids_0 + sep + token_ids_1 + sep
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A DeBERTa
+ sequence pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ # Copied from transformers.models.gpt2.tokenization_gpt2_fast.GPT2TokenizerFast._batch_encode_plus
+ def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
+ is_split_into_words = kwargs.get("is_split_into_words", False)
+ assert self.add_prefix_space or not is_split_into_words, (
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
+ "to use it with pretokenized inputs."
+ )
+
+ return super()._batch_encode_plus(*args, **kwargs)
+
+ # Copied from transformers.models.gpt2.tokenization_gpt2_fast.GPT2TokenizerFast._encode_plus
+ def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
+ is_split_into_words = kwargs.get("is_split_into_words", False)
+
+ assert self.add_prefix_space or not is_split_into_words, (
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
+ "to use it with pretokenized inputs."
+ )
+
+ return super()._encode_plus(*args, **kwargs)
+
+ # Copied from transformers.models.gpt2.tokenization_gpt2_fast.GPT2TokenizerFast.save_vocabulary
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
+ return tuple(files)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/detr/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/detr/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e2bb330e6800a6789729f87f8939cd50cf3f6380
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/detr/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/detr/__pycache__/configuration_detr.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/detr/__pycache__/configuration_detr.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b216a55eb3cb321f981bb715663f0e541507cc40
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/detr/__pycache__/configuration_detr.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/detr/__pycache__/image_processing_detr.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/detr/__pycache__/image_processing_detr.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c83d2e9046300b638ae4b28aa5ed66a1be591030
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/detr/__pycache__/image_processing_detr.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/encodec/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/encodec/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d3d9488968bf2cc6316ba5eb4601e3dc3e5878b8
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/encodec/__init__.py
@@ -0,0 +1,65 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "configuration_encodec": [
+ "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
+ "EncodecConfig",
+ ],
+ "feature_extraction_encodec": ["EncodecFeatureExtractor"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_encodec"] = [
+ "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "EncodecModel",
+ "EncodecPreTrainedModel",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_encodec import (
+ ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
+ EncodecConfig,
+ )
+ from .feature_extraction_encodec import EncodecFeatureExtractor
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_encodec import (
+ ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
+ EncodecModel,
+ EncodecPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4e0d3c2d6738df6962bbf2bc3f05bfb1ee1944f0
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/configuration_encodec.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/configuration_encodec.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..577a351f3a30cb8d27f6edc012f53373d3549a37
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/configuration_encodec.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/convert_encodec_checkpoint_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/convert_encodec_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7b5cdde39e54ca4c694d06eba10e9e935701e8f4
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/convert_encodec_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/feature_extraction_encodec.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/feature_extraction_encodec.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..edd1f9ee27e63a8b3983fd8dc96b4473a9328439
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/feature_extraction_encodec.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/modeling_encodec.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/modeling_encodec.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a8c4738f2ee40a7167d4702d10e6cfc44cd50686
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/encodec/__pycache__/modeling_encodec.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/encodec/configuration_encodec.py b/venv/lib/python3.10/site-packages/transformers/models/encodec/configuration_encodec.py
new file mode 100644
index 0000000000000000000000000000000000000000..4e18bb178adf237b143c75e1406234ca36efadc3
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/encodec/configuration_encodec.py
@@ -0,0 +1,193 @@
+# coding=utf-8
+# Copyright 2023 Meta Platforms, Inc. and affiliates, and the HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" EnCodec model configuration"""
+
+
+import math
+from typing import Optional
+
+import numpy as np
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class EncodecConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of an [`EncodecModel`]. It is used to instantiate a
+ Encodec model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the
+ [facebook/encodec_24khz](https://huggingface.co/facebook/encodec_24khz) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ target_bandwidths (`List[float]`, *optional*, defaults to `[1.5, 3.0, 6.0, 12.0, 24.0]`):
+ The range of diffent bandwiths the model can encode audio with.
+ sampling_rate (`int`, *optional*, defaults to 24000):
+ The sampling rate at which the audio waveform should be digitalized expressed in hertz (Hz).
+ audio_channels (`int`, *optional*, defaults to 1):
+ Number of channels in the audio data. Either 1 for mono or 2 for stereo.
+ normalize (`bool`, *optional*, defaults to `False`):
+ Whether the audio shall be normalized when passed.
+ chunk_length_s (`float`, *optional*):
+ If defined the audio is pre-processed into chunks of lengths `chunk_length_s` and then encoded.
+ overlap (`float`, *optional*):
+ Defines the overlap between each chunk. It is used to compute the `chunk_stride` using the following
+ formulae : `int((1.0 - self.overlap) * self.chunk_length)`.
+ hidden_size (`int`, *optional*, defaults to 128):
+ Intermediate representation dimension.
+ num_filters (`int`, *optional*, defaults to 32):
+ Number of convolution kernels of first `EncodecConv1d` down sampling layer.
+ num_residual_layers (`int`, *optional*, defaults to 1):
+ Number of residual layers.
+ upsampling_ratios (`Sequence[int]` , *optional*, defaults to `[8, 5, 4, 2]`):
+ Kernel size and stride ratios. The encoder uses downsampling ratios instead of upsampling ratios, hence it
+ will use the ratios in the reverse order to the ones specified here that must match the decoder order.
+ norm_type (`str`, *optional*, defaults to `"weight_norm"`):
+ Normalization method. Should be in `["weight_norm", "time_group_norm"]`
+ kernel_size (`int`, *optional*, defaults to 7):
+ Kernel size for the initial convolution.
+ last_kernel_size (`int`, *optional*, defaults to 7):
+ Kernel size for the last convolution layer.
+ residual_kernel_size (`int`, *optional*, defaults to 3):
+ Kernel size for the residual layers.
+ dilation_growth_rate (`int`, *optional*, defaults to 2):
+ How much to increase the dilation with each layer.
+ use_causal_conv (`bool`, *optional*, defaults to `True`):
+ Whether to use fully causal convolution.
+ pad_mode (`str`, *optional*, defaults to `"reflect"`):
+ Padding mode for the convolutions.
+ compress (`int`, *optional*, defaults to 2):
+ Reduced dimensionality in residual branches (from Demucs v3).
+ num_lstm_layers (`int`, *optional*, defaults to 2):
+ Number of LSTM layers at the end of the encoder.
+ trim_right_ratio (`float`, *optional*, defaults to 1.0):
+ Ratio for trimming at the right of the transposed convolution under the `use_causal_conv = True` setup. If
+ equal to 1.0, it means that all the trimming is done at the right.
+ codebook_size (`int`, *optional*, defaults to 1024):
+ Number of discret codes that make up VQVAE.
+ codebook_dim (`int`, *optional*):
+ Dimension of the codebook vectors. If not defined, uses `hidden_size`.
+ use_conv_shortcut (`bool`, *optional*, defaults to `True`):
+ Whether to use a convolutional layer as the 'skip' connection in the `EncodecResnetBlock` block. If False,
+ an identity function will be used, giving a generic residual connection.
+
+ Example:
+
+ ```python
+ >>> from transformers import EncodecModel, EncodecConfig
+
+ >>> # Initializing a "facebook/encodec_24khz" style configuration
+ >>> configuration = EncodecConfig()
+
+ >>> # Initializing a model (with random weights) from the "facebook/encodec_24khz" style configuration
+ >>> model = EncodecModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "encodec"
+
+ def __init__(
+ self,
+ target_bandwidths=[1.5, 3.0, 6.0, 12.0, 24.0],
+ sampling_rate=24_000,
+ audio_channels=1,
+ normalize=False,
+ chunk_length_s=None,
+ overlap=None,
+ hidden_size=128,
+ num_filters=32,
+ num_residual_layers=1,
+ upsampling_ratios=[8, 5, 4, 2],
+ norm_type="weight_norm",
+ kernel_size=7,
+ last_kernel_size=7,
+ residual_kernel_size=3,
+ dilation_growth_rate=2,
+ use_causal_conv=True,
+ pad_mode="reflect",
+ compress=2,
+ num_lstm_layers=2,
+ trim_right_ratio=1.0,
+ codebook_size=1024,
+ codebook_dim=None,
+ use_conv_shortcut=True,
+ **kwargs,
+ ):
+ self.target_bandwidths = target_bandwidths
+ self.sampling_rate = sampling_rate
+ self.audio_channels = audio_channels
+ self.normalize = normalize
+ self.chunk_length_s = chunk_length_s
+ self.overlap = overlap
+ self.hidden_size = hidden_size
+ self.num_filters = num_filters
+ self.num_residual_layers = num_residual_layers
+ self.upsampling_ratios = upsampling_ratios
+ self.norm_type = norm_type
+ self.kernel_size = kernel_size
+ self.last_kernel_size = last_kernel_size
+ self.residual_kernel_size = residual_kernel_size
+ self.dilation_growth_rate = dilation_growth_rate
+ self.use_causal_conv = use_causal_conv
+ self.pad_mode = pad_mode
+ self.compress = compress
+ self.num_lstm_layers = num_lstm_layers
+ self.trim_right_ratio = trim_right_ratio
+ self.codebook_size = codebook_size
+ self.codebook_dim = codebook_dim if codebook_dim is not None else hidden_size
+ self.use_conv_shortcut = use_conv_shortcut
+
+ if self.norm_type not in ["weight_norm", "time_group_norm"]:
+ raise ValueError(
+ f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}'
+ )
+
+ super().__init__(**kwargs)
+
+ # This is a property because you might want to change the chunk_length_s on the fly
+ @property
+ def chunk_length(self) -> Optional[int]:
+ if self.chunk_length_s is None:
+ return None
+ else:
+ return int(self.chunk_length_s * self.sampling_rate)
+
+ # This is a property because you might want to change the chunk_length_s on the fly
+ @property
+ def chunk_stride(self) -> Optional[int]:
+ if self.chunk_length_s is None or self.overlap is None:
+ return None
+ else:
+ return max(1, int((1.0 - self.overlap) * self.chunk_length))
+
+ @property
+ def frame_rate(self) -> int:
+ hop_length = np.prod(self.upsampling_ratios)
+ return math.ceil(self.sampling_rate / hop_length)
+
+ @property
+ def num_quantizers(self) -> int:
+ return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10))
diff --git a/venv/lib/python3.10/site-packages/transformers/models/encodec/convert_encodec_checkpoint_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/encodec/convert_encodec_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..3a16a4b7ba0f3b66412e63591055c3fb2afab9ec
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/encodec/convert_encodec_checkpoint_to_pytorch.py
@@ -0,0 +1,365 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert EnCodec checkpoints."""
+
+import argparse
+
+import torch
+
+from transformers import (
+ EncodecConfig,
+ EncodecFeatureExtractor,
+ EncodecModel,
+ logging,
+)
+
+
+# checkpoints downloaded from:
+# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
+# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
+# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger("transformers.models.encodec")
+
+MAPPING_QUANTIZER = {
+ "quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
+ "quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
+ "quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
+ "quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
+}
+MAPPING_ENCODER = {
+ "encoder.model.0.conv.conv": "encoder.layers.0.conv",
+ "encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
+ "encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
+ "encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
+ "encoder.model.3.conv.conv": "encoder.layers.3.conv",
+ "encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
+ "encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
+ "encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
+ "encoder.model.6.conv.conv": "encoder.layers.6.conv",
+ "encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
+ "encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
+ "encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
+ "encoder.model.9.conv.conv": "encoder.layers.9.conv",
+ "encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
+ "encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
+ "encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
+ "encoder.model.12.conv.conv": "encoder.layers.12.conv",
+ "encoder.model.13.lstm": "encoder.layers.13.lstm",
+ "encoder.model.15.conv.conv": "encoder.layers.15.conv",
+}
+MAPPING_ENCODER_48K = {
+ "encoder.model.0.conv.norm": "encoder.layers.0.norm",
+ "encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
+ "encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
+ "encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
+ "encoder.model.3.conv.norm": "encoder.layers.3.norm",
+ "encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
+ "encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
+ "encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
+ "encoder.model.6.conv.norm": "encoder.layers.6.norm",
+ "encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
+ "encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
+ "encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
+ "encoder.model.9.conv.norm": "encoder.layers.9.norm",
+ "encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
+ "encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
+ "encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
+ "encoder.model.12.conv.norm": "encoder.layers.12.norm",
+ "encoder.model.15.conv.norm": "encoder.layers.15.norm",
+}
+MAPPING_DECODER = {
+ "decoder.model.0.conv.conv": "decoder.layers.0.conv",
+ "decoder.model.1.lstm": "decoder.layers.1.lstm",
+ "decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
+ "decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
+ "decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
+ "decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
+ "decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
+ "decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
+ "decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
+ "decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
+ "decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
+ "decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
+ "decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
+ "decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
+ "decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
+ "decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
+ "decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
+ "decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
+ "decoder.model.15.conv.conv": "decoder.layers.15.conv",
+}
+MAPPING_DECODER_48K = {
+ "decoder.model.0.conv.norm": "decoder.layers.0.norm",
+ "decoder.model.3.convtr.norm": "decoder.layers.3.norm",
+ "decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
+ "decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
+ "decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
+ "decoder.model.6.convtr.norm": "decoder.layers.6.norm",
+ "decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
+ "decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
+ "decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
+ "decoder.model.9.convtr.norm": "decoder.layers.9.norm",
+ "decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
+ "decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
+ "decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
+ "decoder.model.12.convtr.norm": "decoder.layers.12.norm",
+ "decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
+ "decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
+ "decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
+ "decoder.model.15.conv.norm": "decoder.layers.15.norm",
+}
+MAPPING_24K = {
+ **MAPPING_QUANTIZER,
+ **MAPPING_ENCODER,
+ **MAPPING_DECODER,
+}
+MAPPING_48K = {
+ **MAPPING_QUANTIZER,
+ **MAPPING_ENCODER,
+ **MAPPING_ENCODER_48K,
+ **MAPPING_DECODER,
+ **MAPPING_DECODER_48K,
+}
+TOP_LEVEL_KEYS = []
+IGNORE_KEYS = []
+
+
+def set_recursively(hf_pointer, key, value, full_name, weight_type):
+ for attribute in key.split("."):
+ hf_pointer = getattr(hf_pointer, attribute)
+
+ if weight_type is not None:
+ hf_shape = getattr(hf_pointer, weight_type).shape
+ else:
+ hf_shape = hf_pointer.shape
+
+ if hf_shape != value.shape:
+ raise ValueError(
+ f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
+ f" {value.shape} for {full_name}"
+ )
+
+ if weight_type == "weight":
+ hf_pointer.weight.data = value
+ elif weight_type == "weight_g":
+ hf_pointer.weight_g.data = value
+ elif weight_type == "weight_v":
+ hf_pointer.weight_v.data = value
+ elif weight_type == "bias":
+ hf_pointer.bias.data = value
+ elif weight_type == "running_mean":
+ hf_pointer.running_mean.data = value
+ elif weight_type == "running_var":
+ hf_pointer.running_var.data = value
+ elif weight_type == "num_batches_tracked":
+ hf_pointer.num_batches_tracked.data = value
+ elif weight_type == "weight_ih_l0":
+ hf_pointer.weight_ih_l0.data = value
+ elif weight_type == "weight_hh_l0":
+ hf_pointer.weight_hh_l0.data = value
+ elif weight_type == "bias_ih_l0":
+ hf_pointer.bias_ih_l0.data = value
+ elif weight_type == "bias_hh_l0":
+ hf_pointer.bias_hh_l0.data = value
+ elif weight_type == "weight_ih_l1":
+ hf_pointer.weight_ih_l1.data = value
+ elif weight_type == "weight_hh_l1":
+ hf_pointer.weight_hh_l1.data = value
+ elif weight_type == "bias_ih_l1":
+ hf_pointer.bias_ih_l1.data = value
+ elif weight_type == "bias_hh_l1":
+ hf_pointer.bias_hh_l1.data = value
+ else:
+ hf_pointer.data = value
+
+ logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.")
+
+
+def should_ignore(name, ignore_keys):
+ for key in ignore_keys:
+ if key.endswith(".*"):
+ if name.startswith(key[:-1]):
+ return True
+ elif ".*." in key:
+ prefix, suffix = key.split(".*.")
+ if prefix in name and suffix in name:
+ return True
+ elif key in name:
+ return True
+ return False
+
+
+def recursively_load_weights(orig_dict, hf_model, model_name):
+ unused_weights = []
+
+ if model_name == "encodec_24khz" or "encodec_32khz":
+ MAPPING = MAPPING_24K
+ elif model_name == "encodec_48khz":
+ MAPPING = MAPPING_48K
+ else:
+ raise ValueError(f"Unsupported model: {model_name}")
+
+ for name, value in orig_dict.items():
+ if should_ignore(name, IGNORE_KEYS):
+ logger.info(f"{name} was ignored")
+ continue
+
+ is_used = False
+ for key, mapped_key in MAPPING.items():
+ if "*" in key:
+ prefix, suffix = key.split(".*.")
+ if prefix in name and suffix in name:
+ key = suffix
+
+ if key in name:
+ # HACK otherwise .embed gets initialized with .embed_avg too
+ if key.endswith("embed") and name.endswith("embed_avg"):
+ continue
+
+ is_used = True
+ if "*" in mapped_key:
+ layer_index = name.split(key)[0].split(".")[-2]
+ mapped_key = mapped_key.replace("*", layer_index)
+ if "weight_g" in name:
+ weight_type = "weight_g"
+ elif "weight_v" in name:
+ weight_type = "weight_v"
+ elif "weight_ih_l0" in name:
+ weight_type = "weight_ih_l0"
+ elif "weight_hh_l0" in name:
+ weight_type = "weight_hh_l0"
+ elif "bias_ih_l0" in name:
+ weight_type = "bias_ih_l0"
+ elif "bias_hh_l0" in name:
+ weight_type = "bias_hh_l0"
+ elif "weight_ih_l1" in name:
+ weight_type = "weight_ih_l1"
+ elif "weight_hh_l1" in name:
+ weight_type = "weight_hh_l1"
+ elif "bias_ih_l1" in name:
+ weight_type = "bias_ih_l1"
+ elif "bias_hh_l1" in name:
+ weight_type = "bias_hh_l1"
+ elif "bias" in name:
+ weight_type = "bias"
+ elif "weight" in name:
+ weight_type = "weight"
+ elif "running_mean" in name:
+ weight_type = "running_mean"
+ elif "running_var" in name:
+ weight_type = "running_var"
+ elif "num_batches_tracked" in name:
+ weight_type = "num_batches_tracked"
+ else:
+ weight_type = None
+ set_recursively(hf_model, mapped_key, value, name, weight_type)
+ continue
+ if not is_used:
+ unused_weights.append(name)
+
+ logger.warning(f"Unused weights: {unused_weights}")
+
+
+@torch.no_grad()
+def convert_checkpoint(
+ model_name,
+ checkpoint_path,
+ pytorch_dump_folder_path,
+ config_path=None,
+ repo_id=None,
+):
+ """
+ Copy/paste/tweak model's weights to transformers design.
+ """
+ if config_path is not None:
+ config = EncodecConfig.from_pretrained(config_path)
+ else:
+ config = EncodecConfig()
+
+ if model_name == "encodec_24khz":
+ pass # config is already correct
+ elif model_name == "encodec_32khz":
+ config.upsampling_ratios = [8, 5, 4, 4]
+ config.target_bandwidths = [2.2]
+ config.num_filters = 64
+ config.sampling_rate = 32_000
+ config.codebook_size = 2048
+ config.use_causal_conv = False
+ config.normalize = False
+ config.use_conv_shortcut = False
+ elif model_name == "encodec_48khz":
+ config.upsampling_ratios = [8, 5, 4, 2]
+ config.target_bandwidths = [3.0, 6.0, 12.0, 24.0]
+ config.sampling_rate = 48_000
+ config.audio_channels = 2
+ config.use_causal_conv = False
+ config.norm_type = "time_group_norm"
+ config.normalize = True
+ config.chunk_length_s = 1.0
+ config.overlap = 0.01
+ else:
+ raise ValueError(f"Unknown model name: {model_name}")
+
+ model = EncodecModel(config)
+
+ feature_extractor = EncodecFeatureExtractor(
+ feature_size=config.audio_channels,
+ sampling_rate=config.sampling_rate,
+ chunk_length_s=config.chunk_length_s,
+ overlap=config.overlap,
+ )
+ feature_extractor.save_pretrained(pytorch_dump_folder_path)
+
+ original_checkpoint = torch.load(checkpoint_path)
+ if "best_state" in original_checkpoint:
+ # we might have a training state saved, in which case discard the yaml results and just retain the weights
+ original_checkpoint = original_checkpoint["best_state"]
+ recursively_load_weights(original_checkpoint, model, model_name)
+ model.save_pretrained(pytorch_dump_folder_path)
+
+ if repo_id:
+ print("Pushing to the hub...")
+ feature_extractor.push_to_hub(repo_id)
+ model.push_to_hub(repo_id)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--model",
+ default="encodec_24khz",
+ type=str,
+ help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
+ )
+ parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
+ parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
+ parser.add_argument(
+ "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
+ )
+ parser.add_argument(
+ "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
+ )
+
+ args = parser.parse_args()
+ convert_checkpoint(
+ args.model,
+ args.checkpoint_path,
+ args.pytorch_dump_folder_path,
+ args.config_path,
+ args.push_to_hub,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/encodec/feature_extraction_encodec.py b/venv/lib/python3.10/site-packages/transformers/models/encodec/feature_extraction_encodec.py
new file mode 100644
index 0000000000000000000000000000000000000000..6f7536a52e9f99deeb97ffc9ef8accbbbed664d2
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/encodec/feature_extraction_encodec.py
@@ -0,0 +1,206 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Feature extractor class for EnCodec."""
+
+from typing import List, Optional, Union
+
+import numpy as np
+
+from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
+from ...feature_extraction_utils import BatchFeature
+from ...utils import PaddingStrategy, TensorType, logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class EncodecFeatureExtractor(SequenceFeatureExtractor):
+ r"""
+ Constructs an EnCodec feature extractor.
+
+ This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
+ most of the main methods. Users should refer to this superclass for more information regarding those methods.
+
+ Instantiating a feature extractor with the defaults will yield a similar configuration to that of the
+ [facebook/encodec_24khz](https://huggingface.co/facebook/encodec_24khz) architecture.
+
+ Args:
+ feature_size (`int`, *optional*, defaults to 1):
+ The feature dimension of the extracted features. Use 1 for mono, 2 for stereo.
+ sampling_rate (`int`, *optional*, defaults to 24000):
+ The sampling rate at which the audio waveform should be digitalized expressed in hertz (Hz).
+ padding_value (`float`, *optional*, defaults to 0.0):
+ The value that is used to fill the padding values.
+ chunk_length_s (`float`, *optional*):
+ If defined the audio is pre-processed into chunks of lengths `chunk_length_s` and then encoded.
+ overlap (`float`, *optional*):
+ Defines the overlap between each chunk. It is used to compute the `chunk_stride` using the following
+ formulae : `int((1.0 - self.overlap) * self.chunk_length)`.
+ """
+
+ model_input_names = ["input_values", "padding_mask"]
+
+ def __init__(
+ self,
+ feature_size: int = 1,
+ sampling_rate: int = 24000,
+ padding_value: float = 0.0,
+ chunk_length_s: float = None,
+ overlap: float = None,
+ **kwargs,
+ ):
+ super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
+ self.chunk_length_s = chunk_length_s
+ self.overlap = overlap
+
+ # This is a property because you might want to change the chunk_length_s on the fly
+ @property
+ def chunk_length(self) -> Optional[int]:
+ if self.chunk_length_s is None:
+ return None
+ else:
+ return int(self.chunk_length_s * self.sampling_rate)
+
+ # This is a property because you might want to change the chunk_length_s on the fly
+ @property
+ def chunk_stride(self) -> Optional[int]:
+ if self.chunk_length_s is None or self.overlap is None:
+ return None
+ else:
+ return max(1, int((1.0 - self.overlap) * self.chunk_length))
+
+ def __call__(
+ self,
+ raw_audio: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
+ padding: Optional[Union[bool, str, PaddingStrategy]] = None,
+ truncation: Optional[bool] = False,
+ max_length: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ sampling_rate: Optional[int] = None,
+ ) -> BatchFeature:
+ """
+ Main method to featurize and prepare for the model one or several sequence(s).
+
+ Args:
+ raw_audio (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`):
+ The sequence or batch of sequences to be processed. Each sequence can be a numpy array, a list of float
+ values, a list of numpy arrays or a list of list of float values. The numpy array must be of shape
+ `(num_samples,)` for mono audio (`feature_size = 1`), or `(2, num_samples)` for stereo audio
+ (`feature_size = 2`).
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
+ index) among:
+
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
+ sequence if provided).
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
+ acceptable input length for the model if that argument is not provided.
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
+ lengths).
+ truncation (`bool`, *optional*, defaults to `False`):
+ Activates truncation to cut input sequences longer than `max_length` to `max_length`.
+ max_length (`int`, *optional*):
+ Maximum length of the returned list and optionally padding length (see above).
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
+ If set, will return tensors instead of list of python integers. Acceptable values are:
+
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
+ - `'np'`: Return Numpy `np.ndarray` objects.
+ sampling_rate (`int`, *optional*):
+ The sampling rate at which the `audio` input was sampled. It is strongly recommended to pass
+ `sampling_rate` at the forward call to prevent silent errors.
+ """
+ if sampling_rate is not None:
+ if sampling_rate != self.sampling_rate:
+ raise ValueError(
+ f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
+ f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
+ f" {self.sampling_rate} and not {sampling_rate}."
+ )
+ else:
+ logger.warning(
+ "It is strongly recommended to pass the `sampling_rate` argument to this function. "
+ "Failing to do so can result in silent errors that might be hard to debug."
+ )
+
+ if padding and truncation:
+ raise ValueError("Both padding and truncation were set. Make sure you only set one.")
+ elif padding is None:
+ # by default let's pad the inputs
+ padding = True
+
+ is_batched = bool(
+ isinstance(raw_audio, (list, tuple)) and (isinstance(raw_audio[0], (np.ndarray, tuple, list)))
+ )
+
+ if is_batched:
+ raw_audio = [np.asarray(audio, dtype=np.float32).T for audio in raw_audio]
+ elif not is_batched and not isinstance(raw_audio, np.ndarray):
+ raw_audio = np.asarray(raw_audio, dtype=np.float32)
+ elif isinstance(raw_audio, np.ndarray) and raw_audio.dtype is np.dtype(np.float64):
+ raw_audio = raw_audio.astype(np.float32)
+
+ # always return batch
+ if not is_batched:
+ raw_audio = [np.asarray(raw_audio).T]
+
+ # verify inputs are valid
+ for idx, example in enumerate(raw_audio):
+ if example.ndim > 2:
+ raise ValueError(f"Expected input shape (channels, length) but got shape {example.shape}")
+ if self.feature_size == 1 and example.ndim != 1:
+ raise ValueError(f"Expected mono audio but example has {example.shape[-1]} channels")
+ if self.feature_size == 2 and example.shape[-1] != 2:
+ raise ValueError(f"Expected stereo audio but example has {example.shape[-1]} channels")
+
+ padded_inputs = None
+ input_values = BatchFeature({"input_values": raw_audio})
+ if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
+ if truncation:
+ max_length = min(array.shape[0] for array in raw_audio)
+ nb_step = int(np.floor(max_length / self.chunk_stride))
+ max_length = (nb_step - 1) * self.chunk_stride + self.chunk_length
+ elif padding:
+ max_length = max(array.shape[0] for array in raw_audio)
+ nb_step = int(np.ceil(max_length / self.chunk_stride))
+ max_length = (nb_step - 1) * self.chunk_stride + self.chunk_length
+ padding = "max_length"
+ else:
+ padded_inputs = input_values
+
+ # normal padding on batch
+ if padded_inputs is None:
+ padded_inputs = self.pad(
+ input_values,
+ max_length=max_length,
+ truncation=truncation,
+ padding=padding,
+ return_attention_mask=padding,
+ )
+ if padding:
+ padded_inputs["padding_mask"] = padded_inputs.pop("attention_mask")
+
+ input_values = []
+ for example in padded_inputs.pop("input_values"):
+ if self.feature_size == 1:
+ example = example[..., None]
+ input_values.append(example.T)
+
+ padded_inputs["input_values"] = input_values
+ if return_tensors is not None:
+ padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
+
+ return padded_inputs
diff --git a/venv/lib/python3.10/site-packages/transformers/models/encodec/modeling_encodec.py b/venv/lib/python3.10/site-packages/transformers/models/encodec/modeling_encodec.py
new file mode 100644
index 0000000000000000000000000000000000000000..48498b741d18cab693df38fc35997b3aac8371e6
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/encodec/modeling_encodec.py
@@ -0,0 +1,810 @@
+# coding=utf-8
+# Copyright 2023 Meta Platforms, Inc. and affiliates, and the HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch EnCodec model."""
+
+import math
+from dataclasses import dataclass
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_encodec import EncodecConfig
+
+
+logger = logging.get_logger(__name__)
+
+
+# General docstring
+_CONFIG_FOR_DOC = "EncodecConfig"
+
+
+from ..deprecated._archive_maps import ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+@dataclass
+class EncodecOutput(ModelOutput):
+ """
+ Args:
+ audio_codes (`torch.LongTensor` of shape `(batch_size, nb_chunks, chunk_length)`, *optional*):
+ Discret code embeddings computed using `model.encode`.
+ audio_values (`torch.FlaotTensor` of shape `(batch_size, sequence_length)`, *optional*)
+ Decoded audio values, obtained using the decoder part of Encodec.
+ """
+
+ audio_codes: torch.LongTensor = None
+ audio_values: torch.FloatTensor = None
+
+
+@dataclass
+class EncodecEncoderOutput(ModelOutput):
+ """
+ Args:
+ audio_codes (`torch.LongTensor` of shape `(batch_size, nb_chunks, chunk_length)`, *optional*):
+ Discret code embeddings computed using `model.encode`.
+ audio_scales (`torch.Tensor` of shape `(batch_size, nb_chunks)`, *optional*):
+ Scaling factor for each `audio_codes` input. This is used to unscale each chunk of audio when decoding.
+ """
+
+ audio_codes: torch.LongTensor = None
+ audio_scales: torch.FloatTensor = None
+
+
+@dataclass
+class EncodecDecoderOutput(ModelOutput):
+ """
+ Args:
+ audio_values (`torch.FloatTensor` of shape `(batch_size, segment_length)`, *optional*):
+ Decoded audio values, obtained using the decoder part of Encodec.
+ """
+
+ audio_values: torch.FloatTensor = None
+
+
+class EncodecConv1d(nn.Module):
+ """Conv1d with asymmetric or causal padding and normalization."""
+
+ def __init__(
+ self, config, in_channels: int, out_channels: int, kernel_size: int, stride: int = 1, dilation: int = 1
+ ):
+ super().__init__()
+ self.causal = config.use_causal_conv
+ self.pad_mode = config.pad_mode
+ self.norm_type = config.norm_type
+
+ if self.norm_type not in ["weight_norm", "time_group_norm"]:
+ raise ValueError(
+ f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}'
+ )
+
+ # warn user on unusual setup between dilation and stride
+ if stride > 1 and dilation > 1:
+ logger.warning(
+ "EncodecConv1d has been initialized with stride > 1 and dilation > 1"
+ f" (kernel_size={kernel_size} stride={stride}, dilation={dilation})."
+ )
+
+ self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, stride, dilation=dilation)
+ if self.norm_type == "weight_norm":
+ self.conv = nn.utils.weight_norm(self.conv)
+ elif self.norm_type == "time_group_norm":
+ self.norm = nn.GroupNorm(1, out_channels)
+
+ kernel_size = self.conv.kernel_size[0]
+ stride = torch.tensor(self.conv.stride[0], dtype=torch.int64)
+ dilation = self.conv.dilation[0]
+
+ # Effective kernel size with dilations.
+ kernel_size = torch.tensor((kernel_size - 1) * dilation + 1, dtype=torch.int64)
+
+ self.register_buffer("stride", stride, persistent=False)
+ self.register_buffer("kernel_size", kernel_size, persistent=False)
+ self.register_buffer("padding_total", torch.tensor(kernel_size - stride, dtype=torch.int64), persistent=False)
+
+ def _get_extra_padding_for_conv1d(
+ self,
+ hidden_states: torch.Tensor,
+ ) -> torch.Tensor:
+ """See `pad_for_conv1d`."""
+ length = hidden_states.shape[-1]
+ n_frames = (length - self.kernel_size + self.padding_total) / self.stride + 1
+ n_frames = torch.ceil(n_frames).to(torch.int64) - 1
+ ideal_length = n_frames * self.stride + self.kernel_size - self.padding_total
+
+ return ideal_length - length
+
+ @staticmethod
+ def _pad1d(hidden_states: torch.Tensor, paddings: Tuple[int, int], mode: str = "zero", value: float = 0.0):
+ """Tiny wrapper around torch.nn.functional.pad, just to allow for reflect padding on small input.
+ If this is the case, we insert extra 0 padding to the right before the reflection happens.
+ """
+ length = hidden_states.shape[-1]
+ padding_left, padding_right = paddings
+ if not mode == "reflect":
+ return nn.functional.pad(hidden_states, paddings, mode, value)
+
+ max_pad = max(padding_left, padding_right)
+ extra_pad = 0
+ if length <= max_pad:
+ extra_pad = max_pad - length + 1
+ hidden_states = nn.functional.pad(hidden_states, (0, extra_pad))
+ padded = nn.functional.pad(hidden_states, paddings, mode, value)
+ end = padded.shape[-1] - extra_pad
+ return padded[..., :end]
+
+ def forward(self, hidden_states):
+ extra_padding = self._get_extra_padding_for_conv1d(hidden_states)
+
+ if self.causal:
+ # Left padding for causal
+ hidden_states = self._pad1d(hidden_states, (self.padding_total, extra_padding), mode=self.pad_mode)
+ else:
+ # Asymmetric padding required for odd strides
+ padding_right = self.padding_total // 2
+ padding_left = self.padding_total - padding_right
+ hidden_states = self._pad1d(
+ hidden_states, (padding_left, padding_right + extra_padding), mode=self.pad_mode
+ )
+
+ hidden_states = self.conv(hidden_states)
+
+ if self.norm_type == "time_group_norm":
+ hidden_states = self.norm(hidden_states)
+
+ return hidden_states
+
+
+class EncodecConvTranspose1d(nn.Module):
+ """ConvTranspose1d with asymmetric or causal padding and normalization."""
+
+ def __init__(self, config, in_channels: int, out_channels: int, kernel_size: int, stride: int = 1):
+ super().__init__()
+ self.causal = config.use_causal_conv
+ self.trim_right_ratio = config.trim_right_ratio
+ self.norm_type = config.norm_type
+ if self.norm_type not in ["weight_norm", "time_group_norm"]:
+ raise ValueError(
+ f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}'
+ )
+
+ self.conv = nn.ConvTranspose1d(in_channels, out_channels, kernel_size, stride)
+ if config.norm_type == "weight_norm":
+ self.conv = nn.utils.weight_norm(self.conv)
+ elif config.norm_type == "time_group_norm":
+ self.norm = nn.GroupNorm(1, out_channels)
+
+ if not (self.causal or self.trim_right_ratio == 1.0):
+ raise ValueError("`trim_right_ratio` != 1.0 only makes sense for causal convolutions")
+
+ def forward(self, hidden_states):
+ kernel_size = self.conv.kernel_size[0]
+ stride = self.conv.stride[0]
+ padding_total = kernel_size - stride
+
+ hidden_states = self.conv(hidden_states)
+
+ if self.norm_type == "time_group_norm":
+ hidden_states = self.norm(hidden_states)
+
+ # We will only trim fixed padding. Extra padding from `pad_for_conv1d` would be
+ # removed at the very end, when keeping only the right length for the output,
+ # as removing it here would require also passing the length at the matching layer
+ # in the encoder.
+ if self.causal:
+ # Trim the padding on the right according to the specified ratio
+ # if trim_right_ratio = 1.0, trim everything from right
+ padding_right = math.ceil(padding_total * self.trim_right_ratio)
+ else:
+ # Asymmetric padding required for odd strides
+ padding_right = padding_total // 2
+
+ padding_left = padding_total - padding_right
+
+ # unpad
+ end = hidden_states.shape[-1] - padding_right
+ hidden_states = hidden_states[..., padding_left:end]
+ return hidden_states
+
+
+class EncodecLSTM(nn.Module):
+ """
+ LSTM without worrying about the hidden state, nor the layout of the data. Expects input as convolutional layout.
+ """
+
+ def __init__(self, config, dimension):
+ super().__init__()
+ self.lstm = nn.LSTM(dimension, dimension, config.num_lstm_layers)
+
+ def forward(self, hidden_states):
+ hidden_states = hidden_states.permute(2, 0, 1)
+ hidden_states = self.lstm(hidden_states)[0] + hidden_states
+ hidden_states = hidden_states.permute(1, 2, 0)
+ return hidden_states
+
+
+class EncodecResnetBlock(nn.Module):
+ """
+ Residual block from SEANet model as used by EnCodec.
+ """
+
+ def __init__(self, config: EncodecConfig, dim: int, dilations: List[int]):
+ super().__init__()
+ kernel_sizes = (config.residual_kernel_size, 1)
+ if len(kernel_sizes) != len(dilations):
+ raise ValueError("Number of kernel sizes should match number of dilations")
+
+ hidden = dim // config.compress
+ block = []
+ for i, (kernel_size, dilation) in enumerate(zip(kernel_sizes, dilations)):
+ in_chs = dim if i == 0 else hidden
+ out_chs = dim if i == len(kernel_sizes) - 1 else hidden
+ block += [nn.ELU()]
+ block += [EncodecConv1d(config, in_chs, out_chs, kernel_size, dilation=dilation)]
+ self.block = nn.ModuleList(block)
+
+ if config.use_conv_shortcut:
+ self.shortcut = EncodecConv1d(config, dim, dim, kernel_size=1)
+ else:
+ self.shortcut = nn.Identity()
+
+ def forward(self, hidden_states):
+ residual = hidden_states
+ for layer in self.block:
+ hidden_states = layer(hidden_states)
+
+ return self.shortcut(residual) + hidden_states
+
+
+class EncodecEncoder(nn.Module):
+ """SEANet encoder as used by EnCodec."""
+
+ def __init__(self, config: EncodecConfig):
+ super().__init__()
+ model = [EncodecConv1d(config, config.audio_channels, config.num_filters, config.kernel_size)]
+ scaling = 1
+
+ # Downsample to raw audio scale
+ for ratio in reversed(config.upsampling_ratios):
+ current_scale = scaling * config.num_filters
+ # Add residual layers
+ for j in range(config.num_residual_layers):
+ model += [EncodecResnetBlock(config, current_scale, [config.dilation_growth_rate**j, 1])]
+ # Add downsampling layers
+ model += [nn.ELU()]
+ model += [EncodecConv1d(config, current_scale, current_scale * 2, kernel_size=ratio * 2, stride=ratio)]
+ scaling *= 2
+
+ model += [EncodecLSTM(config, scaling * config.num_filters)]
+ model += [nn.ELU()]
+ model += [EncodecConv1d(config, scaling * config.num_filters, config.hidden_size, config.last_kernel_size)]
+
+ self.layers = nn.ModuleList(model)
+
+ def forward(self, hidden_states):
+ for layer in self.layers:
+ hidden_states = layer(hidden_states)
+ return hidden_states
+
+
+class EncodecDecoder(nn.Module):
+ """SEANet decoder as used by EnCodec."""
+
+ def __init__(self, config: EncodecConfig):
+ super().__init__()
+ scaling = int(2 ** len(config.upsampling_ratios))
+ model = [EncodecConv1d(config, config.hidden_size, scaling * config.num_filters, config.kernel_size)]
+
+ model += [EncodecLSTM(config, scaling * config.num_filters)]
+
+ # Upsample to raw audio scale
+ for ratio in config.upsampling_ratios:
+ current_scale = scaling * config.num_filters
+ # Add upsampling layers
+ model += [nn.ELU()]
+ model += [
+ EncodecConvTranspose1d(config, current_scale, current_scale // 2, kernel_size=ratio * 2, stride=ratio)
+ ]
+ # Add residual layers
+ for j in range(config.num_residual_layers):
+ model += [EncodecResnetBlock(config, current_scale // 2, (config.dilation_growth_rate**j, 1))]
+ scaling //= 2
+
+ # Add final layers
+ model += [nn.ELU()]
+ model += [EncodecConv1d(config, config.num_filters, config.audio_channels, config.last_kernel_size)]
+ self.layers = nn.ModuleList(model)
+
+ def forward(self, hidden_states):
+ for layer in self.layers:
+ hidden_states = layer(hidden_states)
+ return hidden_states
+
+
+class EncodecEuclideanCodebook(nn.Module):
+ """Codebook with Euclidean distance."""
+
+ def __init__(self, config: EncodecConfig):
+ super().__init__()
+ embed = torch.zeros(config.codebook_size, config.codebook_dim)
+
+ self.codebook_size = config.codebook_size
+
+ self.register_buffer("inited", torch.Tensor([True]))
+ self.register_buffer("cluster_size", torch.zeros(config.codebook_size))
+ self.register_buffer("embed", embed)
+ self.register_buffer("embed_avg", embed.clone())
+
+ def quantize(self, hidden_states):
+ embed = self.embed.t()
+ scaled_states = hidden_states.pow(2).sum(1, keepdim=True)
+ dist = -(scaled_states - 2 * hidden_states @ embed + embed.pow(2).sum(0, keepdim=True))
+ embed_ind = dist.max(dim=-1).indices
+ return embed_ind
+
+ def encode(self, hidden_states):
+ shape = hidden_states.shape
+ # pre-process
+ hidden_states = hidden_states.reshape((-1, shape[-1]))
+ # quantize
+ embed_ind = self.quantize(hidden_states)
+ # post-process
+ embed_ind = embed_ind.view(*shape[:-1])
+ return embed_ind
+
+ def decode(self, embed_ind):
+ quantize = nn.functional.embedding(embed_ind, self.embed)
+ return quantize
+
+
+class EncodecVectorQuantization(nn.Module):
+ """
+ Vector quantization implementation. Currently supports only euclidean distance.
+ """
+
+ def __init__(self, config: EncodecConfig):
+ super().__init__()
+ self.codebook = EncodecEuclideanCodebook(config)
+
+ def encode(self, hidden_states):
+ hidden_states = hidden_states.permute(0, 2, 1)
+ embed_in = self.codebook.encode(hidden_states)
+ return embed_in
+
+ def decode(self, embed_ind):
+ quantize = self.codebook.decode(embed_ind)
+ quantize = quantize.permute(0, 2, 1)
+ return quantize
+
+
+class EncodecResidualVectorQuantizer(nn.Module):
+ """Residual Vector Quantizer."""
+
+ def __init__(self, config: EncodecConfig):
+ super().__init__()
+ self.codebook_size = config.codebook_size
+ self.frame_rate = config.frame_rate
+ self.num_quantizers = config.num_quantizers
+ self.layers = nn.ModuleList([EncodecVectorQuantization(config) for _ in range(config.num_quantizers)])
+
+ def get_num_quantizers_for_bandwidth(self, bandwidth: Optional[float] = None) -> int:
+ """Return num_quantizers based on specified target bandwidth."""
+ bw_per_q = math.log2(self.codebook_size) * self.frame_rate
+ num_quantizers = self.num_quantizers
+ if bandwidth is not None and bandwidth > 0.0:
+ num_quantizers = int(max(1, math.floor(bandwidth * 1000 / bw_per_q)))
+ return num_quantizers
+
+ def encode(self, embeddings: torch.Tensor, bandwidth: Optional[float] = None) -> torch.Tensor:
+ """
+ Encode a given input tensor with the specified frame rate at the given bandwidth. The RVQ encode method sets
+ the appropriate number of quantizers to use and returns indices for each quantizer.
+ """
+ num_quantizers = self.get_num_quantizers_for_bandwidth(bandwidth)
+ residual = embeddings
+ all_indices = []
+ for layer in self.layers[:num_quantizers]:
+ indices = layer.encode(residual)
+ quantized = layer.decode(indices)
+ residual = residual - quantized
+ all_indices.append(indices)
+ out_indices = torch.stack(all_indices)
+ return out_indices
+
+ def decode(self, codes: torch.Tensor) -> torch.Tensor:
+ """Decode the given codes to the quantized representation."""
+ quantized_out = torch.tensor(0.0, device=codes.device)
+ for i, indices in enumerate(codes):
+ layer = self.layers[i]
+ quantized = layer.decode(indices)
+ quantized_out = quantized_out + quantized
+ return quantized_out
+
+
+class EncodecPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = EncodecConfig
+ base_model_prefix = "encodec"
+ main_input_name = "input_values"
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+ elif isinstance(module, nn.Conv1d):
+ nn.init.kaiming_normal_(module.weight)
+ if module.bias is not None:
+ k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
+ nn.init.uniform_(module.bias, a=-k, b=k)
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LSTM):
+ for name, param in module.named_parameters():
+ if "weight" in name:
+ nn.init.xavier_uniform_(param)
+ elif "bias" in name:
+ nn.init.constant_(param, 0.0)
+
+
+ENCODEC_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`EncodecConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+ENCODEC_INPUTS_DOCSTRING = r"""
+ Args:
+ input_values (`torch.FloatTensor` of shape `(batch_size, channels, sequence_length)`, *optional*):
+ Raw audio input converted to Float and padded to the approriate length in order to be encoded using chunks
+ of length self.chunk_length and a stride of `config.chunk_stride`.
+ padding_mask (`torch.BoolTensor` of shape `(batch_size, channels, sequence_length)`, *optional*):
+ Mask to avoid computing scaling factors on padding token indices (can we avoid computing conv on these+).
+ Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+
+
+ `padding_mask` should always be passed, unless the input was truncated or not padded. This is because in
+ order to process tensors effectively, the input audio should be padded so that `input_length % stride =
+ step` with `step = chunk_length-stride`. This ensures that all chunks are of the same shape
+
+
+
+ bandwidth (`float`, *optional*):
+ The target bandwidth. Must be one of `config.target_bandwidths`. If `None`, uses the smallest possible
+ bandwidth. bandwidth is represented as a thousandth of what it is, e.g. 6kbps bandwidth is represented as
+ `bandwidth == 6.0`
+ audio_codes (`torch.LongTensor` of shape `(batch_size, nb_chunks, chunk_length)`, *optional*):
+ Discret code embeddings computed using `model.encode`.
+ audio_scales (`torch.Tensor` of shape `(batch_size, nb_chunks)`, *optional*):
+ Scaling factor for each `audio_codes` input.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The EnCodec neural audio codec model.",
+ ENCODEC_START_DOCSTRING,
+)
+class EncodecModel(EncodecPreTrainedModel):
+ def __init__(self, config: EncodecConfig):
+ super().__init__(config)
+ self.config = config
+
+ self.encoder = EncodecEncoder(config)
+ self.decoder = EncodecDecoder(config)
+
+ self.quantizer = EncodecResidualVectorQuantizer(config)
+
+ self.bits_per_codebook = int(math.log2(self.config.codebook_size))
+ if 2**self.bits_per_codebook != self.config.codebook_size:
+ raise ValueError("The codebook_size must be a power of 2.")
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_encoder(self):
+ return self.encoder
+
+ def get_decoder(self):
+ return self.decoder
+
+ def _encode_frame(
+ self, input_values: torch.Tensor, bandwidth: float, padding_mask: int
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
+ """
+ Encodes the given input using the underlying VQVAE. If `config.normalize` is set to `True` the input is first
+ normalized. The padding mask is required to compute the correct scale.
+ """
+ length = input_values.shape[-1]
+ duration = length / self.config.sampling_rate
+
+ if self.config.chunk_length_s is not None and duration > 1e-5 + self.config.chunk_length_s:
+ raise RuntimeError(f"Duration of frame ({duration}) is longer than chunk {self.config.chunk_length_s}")
+
+ scale = None
+ if self.config.normalize:
+ # if the padding is non zero
+ input_values = input_values * padding_mask
+ mono = torch.sum(input_values, 1, keepdim=True) / input_values.shape[1]
+ scale = mono.pow(2).mean(dim=-1, keepdim=True).sqrt() + 1e-8
+ input_values = input_values / scale
+
+ embeddings = self.encoder(input_values)
+ codes = self.quantizer.encode(embeddings, bandwidth)
+ codes = codes.transpose(0, 1)
+ return codes, scale
+
+ def encode(
+ self,
+ input_values: torch.Tensor,
+ padding_mask: torch.Tensor = None,
+ bandwidth: Optional[float] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor, Optional[torch.Tensor]], EncodecEncoderOutput]:
+ """
+ Encodes the input audio waveform into discrete codes.
+
+ Args:
+ input_values (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`):
+ Float values of the input audio waveform.
+ padding_mask (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`):
+ Padding mask used to pad the `input_values`.
+ bandwidth (`float`, *optional*):
+ The target bandwidth. Must be one of `config.target_bandwidths`. If `None`, uses the smallest possible
+ bandwidth. bandwidth is represented as a thousandth of what it is, e.g. 6kbps bandwidth is represented
+ as bandwidth == 6.0
+
+ Returns:
+ A list of frames containing the discrete encoded codes for the input audio waveform, along with rescaling
+ factors for each chunk when `normalize` is True. Each frames is a tuple `(codebook, scale)`, with
+ `codebook` of shape `[batch_size, num_codebooks, frames]`.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ if bandwidth is None:
+ bandwidth = self.config.target_bandwidths[0]
+ if bandwidth not in self.config.target_bandwidths:
+ raise ValueError(
+ f"This model doesn't support the bandwidth {bandwidth}. "
+ f"Select one of {self.config.target_bandwidths}."
+ )
+
+ _, channels, input_length = input_values.shape
+
+ if channels < 1 or channels > 2:
+ raise ValueError(f"Number of audio channels must be 1 or 2, but got {channels}")
+
+ chunk_length = self.config.chunk_length
+ if chunk_length is None:
+ chunk_length = input_length
+ stride = input_length
+ else:
+ stride = self.config.chunk_stride
+
+ if padding_mask is None:
+ padding_mask = torch.ones_like(input_values).bool()
+
+ encoded_frames = []
+ scales = []
+
+ step = chunk_length - stride
+ if (input_length % stride) - step != 0:
+ raise ValueError(
+ "The input length is not properly padded for batched chunked decoding. Make sure to pad the input correctly."
+ )
+
+ for offset in range(0, input_length - step, stride):
+ mask = padding_mask[..., offset : offset + chunk_length].bool()
+ frame = input_values[:, :, offset : offset + chunk_length]
+ encoded_frame, scale = self._encode_frame(frame, bandwidth, mask)
+ encoded_frames.append(encoded_frame)
+ scales.append(scale)
+
+ encoded_frames = torch.stack(encoded_frames)
+
+ if not return_dict:
+ return (encoded_frames, scales)
+
+ return EncodecEncoderOutput(encoded_frames, scales)
+
+ @staticmethod
+ def _linear_overlap_add(frames: List[torch.Tensor], stride: int):
+ # Generic overlap add, with linear fade-in/fade-out, supporting complex scenario
+ # e.g., more than 2 frames per position.
+ # The core idea is to use a weight function that is a triangle,
+ # with a maximum value at the middle of the chunk.
+ # We use this weighting when summing the frames, and divide by the sum of weights
+ # for each positions at the end. Thus:
+ # - if a frame is the only one to cover a position, the weighting is a no-op.
+ # - if 2 frames cover a position:
+ # ... ...
+ # / \/ \
+ # / /\ \
+ # S T , i.e. S offset of second frame starts, T end of first frame.
+ # Then the weight function for each one is: (t - S), (T - t), with `t` a given offset.
+ # After the final normalization, the weight of the second frame at position `t` is
+ # (t - S) / (t - S + (T - t)) = (t - S) / (T - S), which is exactly what we want.
+ #
+ # - if more than 2 frames overlap at a given point, we hope that by induction
+ # something sensible happens.
+ if len(frames) == 0:
+ raise ValueError("`frames` cannot be an empty list.")
+
+ device = frames[0].device
+ dtype = frames[0].dtype
+ shape = frames[0].shape[:-1]
+ total_size = stride * (len(frames) - 1) + frames[-1].shape[-1]
+
+ frame_length = frames[0].shape[-1]
+ time_vec = torch.linspace(0, 1, frame_length + 2, device=device, dtype=dtype)[1:-1]
+ weight = 0.5 - (time_vec - 0.5).abs()
+
+ sum_weight = torch.zeros(total_size, device=device, dtype=dtype)
+ out = torch.zeros(*shape, total_size, device=device, dtype=dtype)
+ offset: int = 0
+
+ for frame in frames:
+ frame_length = frame.shape[-1]
+ out[..., offset : offset + frame_length] += weight[:frame_length] * frame
+ sum_weight[offset : offset + frame_length] += weight[:frame_length]
+ offset += stride
+
+ if sum_weight.min() == 0:
+ raise ValueError(f"`sum_weight` minimum element must be bigger than zero: {sum_weight}`")
+
+ return out / sum_weight
+
+ def _decode_frame(self, codes: torch.Tensor, scale: Optional[torch.Tensor] = None) -> torch.Tensor:
+ codes = codes.transpose(0, 1)
+ embeddings = self.quantizer.decode(codes)
+ outputs = self.decoder(embeddings)
+ if scale is not None:
+ outputs = outputs * scale.view(-1, 1, 1)
+ return outputs
+
+ def decode(
+ self,
+ audio_codes: torch.Tensor,
+ audio_scales: torch.Tensor,
+ padding_mask: Optional[torch.Tensor] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], EncodecDecoderOutput]:
+ """
+ Decodes the given frames into an output audio waveform.
+
+ Note that the output might be a bit bigger than the input. In that case, any extra steps at the end can be
+ trimmed.
+
+ Args:
+ audio_codes (`torch.LongTensor` of shape `(batch_size, nb_chunks, chunk_length)`, *optional*):
+ Discret code embeddings computed using `model.encode`.
+ audio_scales (`torch.Tensor` of shape `(batch_size, nb_chunks)`, *optional*):
+ Scaling factor for each `audio_codes` input.
+ padding_mask (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`):
+ Padding mask used to pad the `input_values`.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+
+ """
+ return_dict = return_dict or self.config.return_dict
+
+ chunk_length = self.config.chunk_length
+ if chunk_length is None:
+ if len(audio_codes) != 1:
+ raise ValueError(f"Expected one frame, got {len(audio_codes)}")
+ audio_values = self._decode_frame(audio_codes[0], audio_scales[0])
+ else:
+ decoded_frames = []
+
+ for frame, scale in zip(audio_codes, audio_scales):
+ frames = self._decode_frame(frame, scale)
+ decoded_frames.append(frames)
+
+ audio_values = self._linear_overlap_add(decoded_frames, self.config.chunk_stride or 1)
+
+ # truncate based on padding mask
+ if padding_mask is not None and padding_mask.shape[-1] < audio_values.shape[-1]:
+ audio_values = audio_values[..., : padding_mask.shape[-1]]
+
+ if not return_dict:
+ return (audio_values,)
+ return EncodecDecoderOutput(audio_values)
+
+ @add_start_docstrings_to_model_forward(ENCODEC_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=EncodecOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_values: torch.Tensor,
+ padding_mask: Optional[torch.Tensor] = None,
+ bandwidth: Optional[float] = None,
+ audio_codes: Optional[torch.Tensor] = None,
+ audio_scales: Optional[torch.Tensor] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], EncodecOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoProcessor, EncodecModel
+
+ >>> dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example")
+ >>> audio_sample = dataset["train"]["audio"][0]["array"]
+
+ >>> model_id = "facebook/encodec_24khz"
+ >>> model = EncodecModel.from_pretrained(model_id)
+ >>> processor = AutoProcessor.from_pretrained(model_id)
+
+ >>> inputs = processor(raw_audio=audio_sample, return_tensors="pt")
+
+ >>> outputs = model(**inputs)
+ >>> audio_codes = outputs.audio_codes
+ >>> audio_values = outputs.audio_values
+ ```"""
+ return_dict = return_dict or self.config.return_dict
+
+ if padding_mask is None:
+ padding_mask = torch.ones_like(input_values).bool()
+
+ if audio_codes is not None and audio_scales is None:
+ raise ValueError("You specified `audio_codes` but did not specify the `audio_scales`")
+
+ if audio_scales is not None and audio_codes is None:
+ raise ValueError("You specified `audio_scales` but did not specify the `audio_codes`")
+
+ if audio_scales is None and audio_codes is None:
+ audio_codes, audio_scales = self.encode(input_values, padding_mask, bandwidth, False)
+
+ audio_values = self.decode(audio_codes, audio_scales, padding_mask, return_dict=return_dict)[0]
+ if not return_dict:
+ return (audio_codes, audio_values)
+
+ return EncodecOutput(audio_codes=audio_codes, audio_values=audio_values)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/glpn/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/glpn/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..94788dcb85e76faa2f312df8d13f5577c21a88d1
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/glpn/__init__.py
@@ -0,0 +1,75 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
+
+
+_import_structure = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
+
+try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["feature_extraction_glpn"] = ["GLPNFeatureExtractor"]
+ _import_structure["image_processing_glpn"] = ["GLPNImageProcessor"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_glpn"] = [
+ "GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "GLPNForDepthEstimation",
+ "GLPNLayer",
+ "GLPNModel",
+ "GLPNPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
+
+ try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .feature_extraction_glpn import GLPNFeatureExtractor
+ from .image_processing_glpn import GLPNImageProcessor
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_glpn import (
+ GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
+ GLPNForDepthEstimation,
+ GLPNLayer,
+ GLPNModel,
+ GLPNPreTrainedModel,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4d0c6ae1c7c15d526f7dc3ae483780dc609d447c
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/configuration_glpn.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/configuration_glpn.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..233d06a9b292b57fe12d2d6e31bee6babdd43fb0
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/configuration_glpn.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/convert_glpn_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/convert_glpn_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..16129252440a57d42c9e7904d45fbbe8a0e1c117
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/convert_glpn_to_pytorch.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/image_processing_glpn.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/image_processing_glpn.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4a0869a33f1c700d51dc7be52ae3cc01ec49477c
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/image_processing_glpn.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/modeling_glpn.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/modeling_glpn.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..659c0f8957db0df66a822f8c5acf4e049dda1806
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/modeling_glpn.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/glpn/configuration_glpn.py b/venv/lib/python3.10/site-packages/transformers/models/glpn/configuration_glpn.py
new file mode 100644
index 0000000000000000000000000000000000000000..c3341192169aa09597f6c51ad07ff5305ef1a4e6
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/glpn/configuration_glpn.py
@@ -0,0 +1,135 @@
+# coding=utf-8
+# Copyright 2022 KAIST and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" GLPN model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class GLPNConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`GLPNModel`]. It is used to instantiate an GLPN
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the GLPN
+ [vinvino02/glpn-kitti](https://huggingface.co/vinvino02/glpn-kitti) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ num_channels (`int`, *optional*, defaults to 3):
+ The number of input channels.
+ num_encoder_blocks (`int`, *optional*, defaults to 4):
+ The number of encoder blocks (i.e. stages in the Mix Transformer encoder).
+ depths (`List[int]`, *optional*, defaults to `[2, 2, 2, 2]`):
+ The number of layers in each encoder block.
+ sr_ratios (`List[int]`, *optional*, defaults to `[8, 4, 2, 1]`):
+ Sequence reduction ratios in each encoder block.
+ hidden_sizes (`List[int]`, *optional*, defaults to `[32, 64, 160, 256]`):
+ Dimension of each of the encoder blocks.
+ patch_sizes (`List[int]`, *optional*, defaults to `[7, 3, 3, 3]`):
+ Patch size before each encoder block.
+ strides (`List[int]`, *optional*, defaults to `[4, 2, 2, 2]`):
+ Stride before each encoder block.
+ num_attention_heads (`List[int]`, *optional*, defaults to `[1, 2, 5, 8]`):
+ Number of attention heads for each attention layer in each block of the Transformer encoder.
+ mlp_ratios (`List[int]`, *optional*, defaults to `[4, 4, 4, 4]`):
+ Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the
+ encoder blocks.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ drop_path_rate (`float`, *optional*, defaults to 0.1):
+ The dropout probability for stochastic depth, used in the blocks of the Transformer encoder.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
+ The epsilon used by the layer normalization layers.
+ decoder_hidden_size (`int`, *optional*, defaults to 64):
+ The dimension of the decoder.
+ max_depth (`int`, *optional*, defaults to 10):
+ The maximum depth of the decoder.
+ head_in_index (`int`, *optional*, defaults to -1):
+ The index of the features to use in the head.
+
+ Example:
+
+ ```python
+ >>> from transformers import GLPNModel, GLPNConfig
+
+ >>> # Initializing a GLPN vinvino02/glpn-kitti style configuration
+ >>> configuration = GLPNConfig()
+
+ >>> # Initializing a model from the vinvino02/glpn-kitti style configuration
+ >>> model = GLPNModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "glpn"
+
+ def __init__(
+ self,
+ num_channels=3,
+ num_encoder_blocks=4,
+ depths=[2, 2, 2, 2],
+ sr_ratios=[8, 4, 2, 1],
+ hidden_sizes=[32, 64, 160, 256],
+ patch_sizes=[7, 3, 3, 3],
+ strides=[4, 2, 2, 2],
+ num_attention_heads=[1, 2, 5, 8],
+ mlp_ratios=[4, 4, 4, 4],
+ hidden_act="gelu",
+ hidden_dropout_prob=0.0,
+ attention_probs_dropout_prob=0.0,
+ initializer_range=0.02,
+ drop_path_rate=0.1,
+ layer_norm_eps=1e-6,
+ decoder_hidden_size=64,
+ max_depth=10,
+ head_in_index=-1,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.num_channels = num_channels
+ self.num_encoder_blocks = num_encoder_blocks
+ self.depths = depths
+ self.sr_ratios = sr_ratios
+ self.hidden_sizes = hidden_sizes
+ self.patch_sizes = patch_sizes
+ self.strides = strides
+ self.mlp_ratios = mlp_ratios
+ self.num_attention_heads = num_attention_heads
+ self.hidden_act = hidden_act
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.initializer_range = initializer_range
+ self.drop_path_rate = drop_path_rate
+ self.layer_norm_eps = layer_norm_eps
+ self.decoder_hidden_size = decoder_hidden_size
+ self.max_depth = max_depth
+ self.head_in_index = head_in_index
diff --git a/venv/lib/python3.10/site-packages/transformers/models/glpn/convert_glpn_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/glpn/convert_glpn_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f0183783ec812f69766d9220efb58652a21cb87
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/glpn/convert_glpn_to_pytorch.py
@@ -0,0 +1,219 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert GLPN checkpoints."""
+
+
+import argparse
+from collections import OrderedDict
+from pathlib import Path
+
+import requests
+import torch
+from PIL import Image
+
+from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
+from transformers.utils import logging
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+
+def rename_keys(state_dict):
+ new_state_dict = OrderedDict()
+ for key, value in state_dict.items():
+ if key.startswith("module.encoder"):
+ key = key.replace("module.encoder", "glpn.encoder")
+ if key.startswith("module.decoder"):
+ key = key.replace("module.decoder", "decoder.stages")
+ if "patch_embed" in key:
+ # replace for example patch_embed1 by patch_embeddings.0
+ idx = key[key.find("patch_embed") + len("patch_embed")]
+ key = key.replace(f"patch_embed{idx}", f"patch_embeddings.{int(idx)-1}")
+ if "norm" in key:
+ key = key.replace("norm", "layer_norm")
+ if "glpn.encoder.layer_norm" in key:
+ # replace for example layer_norm1 by layer_norm.0
+ idx = key[key.find("glpn.encoder.layer_norm") + len("glpn.encoder.layer_norm")]
+ key = key.replace(f"layer_norm{idx}", f"layer_norm.{int(idx)-1}")
+ if "layer_norm1" in key:
+ key = key.replace("layer_norm1", "layer_norm_1")
+ if "layer_norm2" in key:
+ key = key.replace("layer_norm2", "layer_norm_2")
+ if "block" in key:
+ # replace for example block1 by block.0
+ idx = key[key.find("block") + len("block")]
+ key = key.replace(f"block{idx}", f"block.{int(idx)-1}")
+ if "attn.q" in key:
+ key = key.replace("attn.q", "attention.self.query")
+ if "attn.proj" in key:
+ key = key.replace("attn.proj", "attention.output.dense")
+ if "attn" in key:
+ key = key.replace("attn", "attention.self")
+ if "fc1" in key:
+ key = key.replace("fc1", "dense1")
+ if "fc2" in key:
+ key = key.replace("fc2", "dense2")
+ if "linear_pred" in key:
+ key = key.replace("linear_pred", "classifier")
+ if "linear_fuse" in key:
+ key = key.replace("linear_fuse.conv", "linear_fuse")
+ key = key.replace("linear_fuse.bn", "batch_norm")
+ if "linear_c" in key:
+ # replace for example linear_c4 by linear_c.3
+ idx = key[key.find("linear_c") + len("linear_c")]
+ key = key.replace(f"linear_c{idx}", f"linear_c.{int(idx)-1}")
+ if "bot_conv" in key:
+ key = key.replace("bot_conv", "0.convolution")
+ if "skip_conv1" in key:
+ key = key.replace("skip_conv1", "1.convolution")
+ if "skip_conv2" in key:
+ key = key.replace("skip_conv2", "2.convolution")
+ if "fusion1" in key:
+ key = key.replace("fusion1", "1.fusion")
+ if "fusion2" in key:
+ key = key.replace("fusion2", "2.fusion")
+ if "fusion3" in key:
+ key = key.replace("fusion3", "3.fusion")
+ if "fusion" in key and "conv" in key:
+ key = key.replace("conv", "convolutional_layer")
+ if key.startswith("module.last_layer_depth"):
+ key = key.replace("module.last_layer_depth", "head.head")
+ new_state_dict[key] = value
+
+ return new_state_dict
+
+
+def read_in_k_v(state_dict, config):
+ # for each of the encoder blocks:
+ for i in range(config.num_encoder_blocks):
+ for j in range(config.depths[i]):
+ # read in weights + bias of keys and values (which is a single matrix in the original implementation)
+ kv_weight = state_dict.pop(f"glpn.encoder.block.{i}.{j}.attention.self.kv.weight")
+ kv_bias = state_dict.pop(f"glpn.encoder.block.{i}.{j}.attention.self.kv.bias")
+ # next, add keys and values (in that order) to the state dict
+ state_dict[f"glpn.encoder.block.{i}.{j}.attention.self.key.weight"] = kv_weight[
+ : config.hidden_sizes[i], :
+ ]
+ state_dict[f"glpn.encoder.block.{i}.{j}.attention.self.key.bias"] = kv_bias[: config.hidden_sizes[i]]
+ state_dict[f"glpn.encoder.block.{i}.{j}.attention.self.value.weight"] = kv_weight[
+ config.hidden_sizes[i] :, :
+ ]
+ state_dict[f"glpn.encoder.block.{i}.{j}.attention.self.value.bias"] = kv_bias[config.hidden_sizes[i] :]
+
+
+# We will verify our results on a COCO image
+def prepare_img():
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ image = Image.open(requests.get(url, stream=True).raw)
+
+ return image
+
+
+@torch.no_grad()
+def convert_glpn_checkpoint(checkpoint_path, pytorch_dump_folder_path, push_to_hub=False, model_name=None):
+ """
+ Copy/paste/tweak model's weights to our GLPN structure.
+ """
+
+ # load GLPN configuration (Segformer-B4 size)
+ config = GLPNConfig(hidden_sizes=[64, 128, 320, 512], decoder_hidden_size=64, depths=[3, 8, 27, 3])
+
+ # load image processor (only resize + rescale)
+ image_processor = GLPNImageProcessor()
+
+ # prepare image
+ image = prepare_img()
+ pixel_values = image_processor(images=image, return_tensors="pt").pixel_values
+
+ logger.info("Converting model...")
+
+ # load original state dict
+ state_dict = torch.load(checkpoint_path, map_location=torch.device("cpu"))
+
+ # rename keys
+ state_dict = rename_keys(state_dict)
+
+ # key and value matrices need special treatment
+ read_in_k_v(state_dict, config)
+
+ # create HuggingFace model and load state dict
+ model = GLPNForDepthEstimation(config)
+ model.load_state_dict(state_dict)
+ model.eval()
+
+ # forward pass
+ outputs = model(pixel_values)
+ predicted_depth = outputs.predicted_depth
+
+ # verify output
+ if model_name is not None:
+ if "nyu" in model_name:
+ expected_slice = torch.tensor(
+ [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]]
+ )
+ elif "kitti" in model_name:
+ expected_slice = torch.tensor(
+ [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]]
+ )
+ else:
+ raise ValueError(f"Unknown model name: {model_name}")
+
+ expected_shape = torch.Size([1, 480, 640])
+
+ assert predicted_depth.shape == expected_shape
+ assert torch.allclose(predicted_depth[0, :3, :3], expected_slice, atol=1e-4)
+ print("Looks ok!")
+
+ # finally, push to hub if required
+ if push_to_hub:
+ logger.info("Pushing model and image processor to the hub...")
+ model.push_to_hub(
+ repo_path_or_name=Path(pytorch_dump_folder_path, model_name),
+ organization="nielsr",
+ commit_message="Add model",
+ use_temp_dir=True,
+ )
+ image_processor.push_to_hub(
+ repo_path_or_name=Path(pytorch_dump_folder_path, model_name),
+ organization="nielsr",
+ commit_message="Add image processor",
+ use_temp_dir=True,
+ )
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument(
+ "--checkpoint_path",
+ default=None,
+ type=str,
+ help="Path to the original PyTorch checkpoint (.pth file).",
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
+ )
+ parser.add_argument(
+ "--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
+ )
+ parser.add_argument(
+ "--model_name",
+ default="glpn-kitti",
+ type=str,
+ help="Name of the model in case you're pushing to the hub.",
+ )
+ args = parser.parse_args()
+ convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/glpn/feature_extraction_glpn.py b/venv/lib/python3.10/site-packages/transformers/models/glpn/feature_extraction_glpn.py
new file mode 100644
index 0000000000000000000000000000000000000000..314268225d2af41f3cc6af55af4e21aebe087b60
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/glpn/feature_extraction_glpn.py
@@ -0,0 +1,33 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Feature extractor class for GLPN."""
+
+import warnings
+
+from ...utils import logging
+from .image_processing_glpn import GLPNImageProcessor
+
+
+logger = logging.get_logger(__name__)
+
+
+class GLPNFeatureExtractor(GLPNImageProcessor):
+ def __init__(self, *args, **kwargs) -> None:
+ warnings.warn(
+ "The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
+ " use GLPNImageProcessor instead.",
+ FutureWarning,
+ )
+ super().__init__(*args, **kwargs)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/glpn/image_processing_glpn.py b/venv/lib/python3.10/site-packages/transformers/models/glpn/image_processing_glpn.py
new file mode 100644
index 0000000000000000000000000000000000000000..7577b4eeb3d0c20b9d023bc488f8bf3c6bb39fdd
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/glpn/image_processing_glpn.py
@@ -0,0 +1,233 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Image processor class for GLPN."""
+
+from typing import List, Optional, Union
+
+import numpy as np
+import PIL.Image
+
+from ...image_processing_utils import BaseImageProcessor, BatchFeature
+from ...image_transforms import resize, to_channel_dimension_format
+from ...image_utils import (
+ ChannelDimension,
+ PILImageResampling,
+ get_image_size,
+ infer_channel_dimension_format,
+ is_scaled_image,
+ make_list_of_images,
+ to_numpy_array,
+ valid_images,
+ validate_kwargs,
+ validate_preprocess_arguments,
+)
+from ...utils import TensorType, logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class GLPNImageProcessor(BaseImageProcessor):
+ r"""
+ Constructs a GLPN image processor.
+
+ Args:
+ do_resize (`bool`, *optional*, defaults to `True`):
+ Whether to resize the image's (height, width) dimensions, rounding them down to the closest multiple of
+ `size_divisor`. Can be overridden by `do_resize` in `preprocess`.
+ size_divisor (`int`, *optional*, defaults to 32):
+ When `do_resize` is `True`, images are resized so their height and width are rounded down to the closest
+ multiple of `size_divisor`. Can be overridden by `size_divisor` in `preprocess`.
+ resample (`PIL.Image` resampling filter, *optional*, defaults to `Resampling.BILINEAR`):
+ Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`.
+ do_rescale (`bool`, *optional*, defaults to `True`):
+ Whether or not to apply the scaling factor (to make pixel values floats between 0. and 1.). Can be
+ overridden by `do_rescale` in `preprocess`.
+ """
+
+ model_input_names = ["pixel_values"]
+
+ def __init__(
+ self,
+ do_resize: bool = True,
+ size_divisor: int = 32,
+ resample=PILImageResampling.BILINEAR,
+ do_rescale: bool = True,
+ **kwargs,
+ ) -> None:
+ self.do_resize = do_resize
+ self.do_rescale = do_rescale
+ self.size_divisor = size_divisor
+ self.resample = resample
+ super().__init__(**kwargs)
+ self._valid_processor_keys = [
+ "images",
+ "do_resize",
+ "size_divisor",
+ "resample",
+ "do_rescale",
+ "return_tensors",
+ "data_format",
+ "input_data_format",
+ ]
+
+ def resize(
+ self,
+ image: np.ndarray,
+ size_divisor: int,
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
+ data_format: Optional[ChannelDimension] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> np.ndarray:
+ """
+ Resize the image, rounding the (height, width) dimensions down to the closest multiple of size_divisor.
+
+ If the image is of dimension (3, 260, 170) and size_divisor is 32, the image will be resized to (3, 256, 160).
+
+ Args:
+ image (`np.ndarray`):
+ The image to resize.
+ size_divisor (`int`):
+ The image is resized so its height and width are rounded down to the closest multiple of
+ `size_divisor`.
+ resample:
+ `PIL.Image` resampling filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
+ data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the output image. If `None`, the channel dimension format of the input
+ image is used. Can be one of:
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format of the input image. If not set, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+
+ Returns:
+ `np.ndarray`: The resized image.
+ """
+ height, width = get_image_size(image, channel_dim=input_data_format)
+ # Rounds the height and width down to the closest multiple of size_divisor
+ new_h = height // size_divisor * size_divisor
+ new_w = width // size_divisor * size_divisor
+ image = resize(
+ image,
+ (new_h, new_w),
+ resample=resample,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ **kwargs,
+ )
+ return image
+
+ def preprocess(
+ self,
+ images: Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]],
+ do_resize: Optional[bool] = None,
+ size_divisor: Optional[int] = None,
+ resample=None,
+ do_rescale: Optional[bool] = None,
+ return_tensors: Optional[Union[TensorType, str]] = None,
+ data_format: ChannelDimension = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> BatchFeature:
+ """
+ Preprocess the given images.
+
+ Args:
+ images (`PIL.Image.Image` or `TensorType` or `List[np.ndarray]` or `List[TensorType]`):
+ Images to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
+ passing in images with pixel values between 0 and 1, set `do_normalize=False`.
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
+ Whether to resize the input such that the (height, width) dimensions are a multiple of `size_divisor`.
+ size_divisor (`int`, *optional*, defaults to `self.size_divisor`):
+ When `do_resize` is `True`, images are resized so their height and width are rounded down to the
+ closest multiple of `size_divisor`.
+ resample (`PIL.Image` resampling filter, *optional*, defaults to `self.resample`):
+ `PIL.Image` resampling filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has
+ an effect if `do_resize` is set to `True`.
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
+ Whether or not to apply the scaling factor (to make pixel values floats between 0. and 1.).
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Can be one of:
+ - `None`: Return a list of `np.ndarray`.
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format for the output image. Can be one of:
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ """
+ do_resize = do_resize if do_resize is not None else self.do_resize
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
+ size_divisor = size_divisor if size_divisor is not None else self.size_divisor
+ resample = resample if resample is not None else self.resample
+
+ images = make_list_of_images(images)
+
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
+
+ if not valid_images(images):
+ raise ValueError(
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+
+ # Here, the rescale() method uses a constant rescale_factor. It does not need to be validated
+ # with a rescale_factor.
+ validate_preprocess_arguments(
+ do_resize=do_resize,
+ size=size_divisor, # Here, size_divisor is used as a parameter for optimal resizing instead of size.
+ resample=resample,
+ )
+
+ # All transformations expect numpy arrays.
+ images = [to_numpy_array(img) for img in images]
+
+ if is_scaled_image(images[0]) and do_rescale:
+ logger.warning_once(
+ "It looks like you are trying to rescale already rescaled images. If the input"
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
+ )
+
+ if input_data_format is None:
+ # We assume that all images have the same channel dimension format.
+ input_data_format = infer_channel_dimension_format(images[0])
+
+ if do_resize:
+ images = [
+ self.resize(image, size_divisor=size_divisor, resample=resample, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ if do_rescale:
+ images = [self.rescale(image, scale=1 / 255, input_data_format=input_data_format) for image in images]
+
+ images = [
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
+ ]
+
+ data = {"pixel_values": images}
+ return BatchFeature(data=data, tensor_type=return_tensors)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/glpn/modeling_glpn.py b/venv/lib/python3.10/site-packages/transformers/models/glpn/modeling_glpn.py
new file mode 100644
index 0000000000000000000000000000000000000000..e5d30b62720c9d72eaadb3d5125d55f6ac4664ab
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/glpn/modeling_glpn.py
@@ -0,0 +1,778 @@
+# coding=utf-8
+# Copyright 2022 KAIST and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch GLPN model."""
+
+
+import math
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+
+from ...activations import ACT2FN
+from ...modeling_outputs import BaseModelOutput, DepthEstimatorOutput
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_glpn import GLPNConfig
+
+
+logger = logging.get_logger(__name__)
+
+
+# General docstring
+_CONFIG_FOR_DOC = "GLPNConfig"
+
+# Base docstring
+_CHECKPOINT_FOR_DOC = "vinvino02/glpn-kitti"
+_EXPECTED_OUTPUT_SHAPE = [1, 512, 15, 20]
+
+
+from ..deprecated._archive_maps import GLPN_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+# Copied from transformers.models.beit.modeling_beit.drop_path
+def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
+ """
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
+
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
+ argument.
+ """
+ if drop_prob == 0.0 or not training:
+ return input
+ keep_prob = 1 - drop_prob
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
+ random_tensor.floor_() # binarize
+ output = input.div(keep_prob) * random_tensor
+ return output
+
+
+# Copied from transformers.models.segformer.modeling_segformer.SegformerDropPath
+class GLPNDropPath(nn.Module):
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
+
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
+ super().__init__()
+ self.drop_prob = drop_prob
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ return drop_path(hidden_states, self.drop_prob, self.training)
+
+ def extra_repr(self) -> str:
+ return "p={}".format(self.drop_prob)
+
+
+# Copied from transformers.models.segformer.modeling_segformer.SegformerOverlapPatchEmbeddings
+class GLPNOverlapPatchEmbeddings(nn.Module):
+ """Construct the overlapping patch embeddings."""
+
+ def __init__(self, patch_size, stride, num_channels, hidden_size):
+ super().__init__()
+ self.proj = nn.Conv2d(
+ num_channels,
+ hidden_size,
+ kernel_size=patch_size,
+ stride=stride,
+ padding=patch_size // 2,
+ )
+
+ self.layer_norm = nn.LayerNorm(hidden_size)
+
+ def forward(self, pixel_values):
+ embeddings = self.proj(pixel_values)
+ _, _, height, width = embeddings.shape
+ # (batch_size, num_channels, height, width) -> (batch_size, num_channels, height*width) -> (batch_size, height*width, num_channels)
+ # this can be fed to a Transformer layer
+ embeddings = embeddings.flatten(2).transpose(1, 2)
+ embeddings = self.layer_norm(embeddings)
+ return embeddings, height, width
+
+
+# Copied from transformers.models.segformer.modeling_segformer.SegformerEfficientSelfAttention
+class GLPNEfficientSelfAttention(nn.Module):
+ """SegFormer's efficient self-attention mechanism. Employs the sequence reduction process introduced in the [PvT
+ paper](https://arxiv.org/abs/2102.12122)."""
+
+ def __init__(self, config, hidden_size, num_attention_heads, sequence_reduction_ratio):
+ super().__init__()
+ self.hidden_size = hidden_size
+ self.num_attention_heads = num_attention_heads
+
+ if self.hidden_size % self.num_attention_heads != 0:
+ raise ValueError(
+ f"The hidden size ({self.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({self.num_attention_heads})"
+ )
+
+ self.attention_head_size = int(self.hidden_size / self.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(self.hidden_size, self.all_head_size)
+ self.key = nn.Linear(self.hidden_size, self.all_head_size)
+ self.value = nn.Linear(self.hidden_size, self.all_head_size)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+
+ self.sr_ratio = sequence_reduction_ratio
+ if sequence_reduction_ratio > 1:
+ self.sr = nn.Conv2d(
+ hidden_size, hidden_size, kernel_size=sequence_reduction_ratio, stride=sequence_reduction_ratio
+ )
+ self.layer_norm = nn.LayerNorm(hidden_size)
+
+ def transpose_for_scores(self, hidden_states):
+ new_shape = hidden_states.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ hidden_states = hidden_states.view(new_shape)
+ return hidden_states.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ hidden_states,
+ height,
+ width,
+ output_attentions=False,
+ ):
+ query_layer = self.transpose_for_scores(self.query(hidden_states))
+
+ if self.sr_ratio > 1:
+ batch_size, seq_len, num_channels = hidden_states.shape
+ # Reshape to (batch_size, num_channels, height, width)
+ hidden_states = hidden_states.permute(0, 2, 1).reshape(batch_size, num_channels, height, width)
+ # Apply sequence reduction
+ hidden_states = self.sr(hidden_states)
+ # Reshape back to (batch_size, seq_len, num_channels)
+ hidden_states = hidden_states.reshape(batch_size, num_channels, -1).permute(0, 2, 1)
+ hidden_states = self.layer_norm(hidden_states)
+
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ return outputs
+
+
+# Copied from transformers.models.segformer.modeling_segformer.SegformerSelfOutput
+class GLPNSelfOutput(nn.Module):
+ def __init__(self, config, hidden_size):
+ super().__init__()
+ self.dense = nn.Linear(hidden_size, hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states, input_tensor):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.segformer.modeling_segformer.SegformerAttention with Segformer->GLPN
+class GLPNAttention(nn.Module):
+ def __init__(self, config, hidden_size, num_attention_heads, sequence_reduction_ratio):
+ super().__init__()
+ self.self = GLPNEfficientSelfAttention(
+ config=config,
+ hidden_size=hidden_size,
+ num_attention_heads=num_attention_heads,
+ sequence_reduction_ratio=sequence_reduction_ratio,
+ )
+ self.output = GLPNSelfOutput(config, hidden_size=hidden_size)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.self.query = prune_linear_layer(self.self.query, index)
+ self.self.key = prune_linear_layer(self.self.key, index)
+ self.self.value = prune_linear_layer(self.self.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(self, hidden_states, height, width, output_attentions=False):
+ self_outputs = self.self(hidden_states, height, width, output_attentions)
+
+ attention_output = self.output(self_outputs[0], hidden_states)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.segformer.modeling_segformer.SegformerDWConv
+class GLPNDWConv(nn.Module):
+ def __init__(self, dim=768):
+ super().__init__()
+ self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim)
+
+ def forward(self, hidden_states, height, width):
+ batch_size, seq_len, num_channels = hidden_states.shape
+ hidden_states = hidden_states.transpose(1, 2).view(batch_size, num_channels, height, width)
+ hidden_states = self.dwconv(hidden_states)
+ hidden_states = hidden_states.flatten(2).transpose(1, 2)
+
+ return hidden_states
+
+
+# Copied from transformers.models.segformer.modeling_segformer.SegformerMixFFN with Segformer->GLPN
+class GLPNMixFFN(nn.Module):
+ def __init__(self, config, in_features, hidden_features=None, out_features=None):
+ super().__init__()
+ out_features = out_features or in_features
+ self.dense1 = nn.Linear(in_features, hidden_features)
+ self.dwconv = GLPNDWConv(hidden_features)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+ self.dense2 = nn.Linear(hidden_features, out_features)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states, height, width):
+ hidden_states = self.dense1(hidden_states)
+ hidden_states = self.dwconv(hidden_states, height, width)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.dense2(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.segformer.modeling_segformer.SegformerLayer with Segformer->GLPN
+class GLPNLayer(nn.Module):
+ """This corresponds to the Block class in the original implementation."""
+
+ def __init__(self, config, hidden_size, num_attention_heads, drop_path, sequence_reduction_ratio, mlp_ratio):
+ super().__init__()
+ self.layer_norm_1 = nn.LayerNorm(hidden_size)
+ self.attention = GLPNAttention(
+ config,
+ hidden_size=hidden_size,
+ num_attention_heads=num_attention_heads,
+ sequence_reduction_ratio=sequence_reduction_ratio,
+ )
+ self.drop_path = GLPNDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
+ self.layer_norm_2 = nn.LayerNorm(hidden_size)
+ mlp_hidden_size = int(hidden_size * mlp_ratio)
+ self.mlp = GLPNMixFFN(config, in_features=hidden_size, hidden_features=mlp_hidden_size)
+
+ def forward(self, hidden_states, height, width, output_attentions=False):
+ self_attention_outputs = self.attention(
+ self.layer_norm_1(hidden_states), # in GLPN, layernorm is applied before self-attention
+ height,
+ width,
+ output_attentions=output_attentions,
+ )
+
+ attention_output = self_attention_outputs[0]
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ # first residual connection (with stochastic depth)
+ attention_output = self.drop_path(attention_output)
+ hidden_states = attention_output + hidden_states
+
+ mlp_output = self.mlp(self.layer_norm_2(hidden_states), height, width)
+
+ # second residual connection (with stochastic depth)
+ mlp_output = self.drop_path(mlp_output)
+ layer_output = mlp_output + hidden_states
+
+ outputs = (layer_output,) + outputs
+
+ return outputs
+
+
+class GLPNEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+
+ # stochastic depth decay rule
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))]
+
+ # patch embeddings
+ embeddings = []
+ for i in range(config.num_encoder_blocks):
+ embeddings.append(
+ GLPNOverlapPatchEmbeddings(
+ patch_size=config.patch_sizes[i],
+ stride=config.strides[i],
+ num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1],
+ hidden_size=config.hidden_sizes[i],
+ )
+ )
+ self.patch_embeddings = nn.ModuleList(embeddings)
+
+ # Transformer blocks
+ blocks = []
+ cur = 0
+ for i in range(config.num_encoder_blocks):
+ # each block consists of layers
+ layers = []
+ if i != 0:
+ cur += config.depths[i - 1]
+ for j in range(config.depths[i]):
+ layers.append(
+ GLPNLayer(
+ config,
+ hidden_size=config.hidden_sizes[i],
+ num_attention_heads=config.num_attention_heads[i],
+ drop_path=dpr[cur + j],
+ sequence_reduction_ratio=config.sr_ratios[i],
+ mlp_ratio=config.mlp_ratios[i],
+ )
+ )
+ blocks.append(nn.ModuleList(layers))
+
+ self.block = nn.ModuleList(blocks)
+
+ # Layer norms
+ self.layer_norm = nn.ModuleList(
+ [nn.LayerNorm(config.hidden_sizes[i]) for i in range(config.num_encoder_blocks)]
+ )
+
+ def forward(
+ self,
+ pixel_values,
+ output_attentions=False,
+ output_hidden_states=False,
+ return_dict=True,
+ ):
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ batch_size = pixel_values.shape[0]
+
+ hidden_states = pixel_values
+ for idx, x in enumerate(zip(self.patch_embeddings, self.block, self.layer_norm)):
+ embedding_layer, block_layer, norm_layer = x
+ # first, obtain patch embeddings
+ hidden_states, height, width = embedding_layer(hidden_states)
+ # second, send embeddings through blocks
+ for i, blk in enumerate(block_layer):
+ layer_outputs = blk(hidden_states, height, width, output_attentions)
+ hidden_states = layer_outputs[0]
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+ # third, apply layer norm
+ hidden_states = norm_layer(hidden_states)
+ # fourth, optionally reshape back to (batch_size, num_channels, height, width)
+ hidden_states = hidden_states.reshape(batch_size, height, width, -1).permute(0, 3, 1, 2).contiguous()
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+
+class GLPNPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = GLPNConfig
+ base_model_prefix = "glpn"
+ main_input_name = "pixel_values"
+
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerPreTrainedModel._init_weights
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+GLPN_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`GLPNConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+GLPN_INPUTS_DOCSTRING = r"""
+
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
+ [`AutoImageProcessor`]. See [`GLPNImageProcessor.__call__`] for details.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare GLPN encoder (Mix-Transformer) outputting raw hidden-states without any specific head on top.",
+ GLPN_START_DOCSTRING,
+)
+class GLPNModel(GLPNPreTrainedModel):
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerModel.__init__ with Segformer->GLPN
+ def __init__(self, config):
+ super().__init__(config)
+ self.config = config
+
+ # hierarchical Transformer encoder
+ self.encoder = GLPNEncoder(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(GLPN_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ modality="vision",
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
+ )
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerModel.forward
+ def forward(
+ self,
+ pixel_values: torch.FloatTensor,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ encoder_outputs = self.encoder(
+ pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+
+ if not return_dict:
+ return (sequence_output,) + encoder_outputs[1:]
+
+ return BaseModelOutput(
+ last_hidden_state=sequence_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+class GLPNSelectiveFeatureFusion(nn.Module):
+ """
+ Selective Feature Fusion module, as explained in the [paper](https://arxiv.org/abs/2201.07436) (section 3.4). This
+ module adaptively selects and integrates local and global features by attaining an attention map for each feature.
+ """
+
+ def __init__(self, in_channel=64):
+ super().__init__()
+
+ self.convolutional_layer1 = nn.Sequential(
+ nn.Conv2d(in_channels=int(in_channel * 2), out_channels=in_channel, kernel_size=3, stride=1, padding=1),
+ nn.BatchNorm2d(in_channel),
+ nn.ReLU(),
+ )
+
+ self.convolutional_layer2 = nn.Sequential(
+ nn.Conv2d(in_channels=in_channel, out_channels=int(in_channel / 2), kernel_size=3, stride=1, padding=1),
+ nn.BatchNorm2d(int(in_channel / 2)),
+ nn.ReLU(),
+ )
+
+ self.convolutional_layer3 = nn.Conv2d(
+ in_channels=int(in_channel / 2), out_channels=2, kernel_size=3, stride=1, padding=1
+ )
+
+ self.sigmoid = nn.Sigmoid()
+
+ def forward(self, local_features, global_features):
+ # concatenate features along the channel dimension
+ features = torch.cat((local_features, global_features), dim=1)
+ # pass through convolutional layers
+ features = self.convolutional_layer1(features)
+ features = self.convolutional_layer2(features)
+ features = self.convolutional_layer3(features)
+ # apply sigmoid to get two-channel attention map
+ attn = self.sigmoid(features)
+ # construct hybrid features by adding element-wise
+ hybrid_features = local_features * attn[:, 0, :, :].unsqueeze(1) + global_features * attn[
+ :, 1, :, :
+ ].unsqueeze(1)
+
+ return hybrid_features
+
+
+class GLPNDecoderStage(nn.Module):
+ def __init__(self, in_channels, out_channels):
+ super().__init__()
+ should_skip = in_channels == out_channels
+ self.convolution = nn.Conv2d(in_channels, out_channels, kernel_size=1) if not should_skip else nn.Identity()
+ self.fusion = GLPNSelectiveFeatureFusion(out_channels)
+ self.upsample = nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False)
+
+ def forward(self, hidden_state, residual=None):
+ hidden_state = self.convolution(hidden_state)
+ if residual is not None:
+ hidden_state = self.fusion(hidden_state, residual)
+ hidden_state = self.upsample(hidden_state)
+
+ return hidden_state
+
+ hidden_state = self.upsample(hidden_state)
+ return hidden_state
+
+
+class GLPNDecoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ # we use features from end -> start
+ reserved_hidden_sizes = config.hidden_sizes[::-1]
+ out_channels = config.decoder_hidden_size
+
+ self.stages = nn.ModuleList(
+ [GLPNDecoderStage(hidden_size, out_channels) for hidden_size in reserved_hidden_sizes]
+ )
+ # don't fuse in first stage
+ self.stages[0].fusion = None
+
+ self.final_upsample = nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False)
+
+ def forward(self, hidden_states: List[torch.Tensor]) -> List[torch.Tensor]:
+ stage_hidden_states = []
+ stage_hidden_state = None
+ for hidden_state, stage in zip(hidden_states[::-1], self.stages):
+ stage_hidden_state = stage(hidden_state, stage_hidden_state)
+ stage_hidden_states.append(stage_hidden_state)
+
+ stage_hidden_states[-1] = self.final_upsample(stage_hidden_state)
+
+ return stage_hidden_states
+
+
+class SiLogLoss(nn.Module):
+ r"""
+ Implements the Scale-invariant log scale loss [Eigen et al., 2014](https://arxiv.org/abs/1406.2283).
+
+ $$L=\frac{1}{n} \sum_{i} d_{i}^{2}-\frac{1}{2 n^{2}}\left(\sum_{i} d_{i}^{2}\right)$$ where $d_{i}=\log y_{i}-\log
+ y_{i}^{*}$.
+
+ """
+
+ def __init__(self, lambd=0.5):
+ super().__init__()
+ self.lambd = lambd
+
+ def forward(self, pred, target):
+ valid_mask = (target > 0).detach()
+ diff_log = torch.log(target[valid_mask]) - torch.log(pred[valid_mask])
+ loss = torch.sqrt(torch.pow(diff_log, 2).mean() - self.lambd * torch.pow(diff_log.mean(), 2))
+
+ return loss
+
+
+class GLPNDepthEstimationHead(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+
+ self.config = config
+
+ channels = config.decoder_hidden_size
+ self.head = nn.Sequential(
+ nn.Conv2d(channels, channels, kernel_size=3, stride=1, padding=1),
+ nn.ReLU(inplace=False),
+ nn.Conv2d(channels, 1, kernel_size=3, stride=1, padding=1),
+ )
+
+ def forward(self, hidden_states: List[torch.Tensor]) -> torch.Tensor:
+ # use last features of the decoder
+ hidden_states = hidden_states[self.config.head_in_index]
+
+ hidden_states = self.head(hidden_states)
+
+ predicted_depth = torch.sigmoid(hidden_states) * self.config.max_depth
+ predicted_depth = predicted_depth.squeeze(dim=1)
+
+ return predicted_depth
+
+
+@add_start_docstrings(
+ """GLPN Model transformer with a lightweight depth estimation head on top e.g. for KITTI, NYUv2.""",
+ GLPN_START_DOCSTRING,
+)
+class GLPNForDepthEstimation(GLPNPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.glpn = GLPNModel(config)
+ self.decoder = GLPNDecoder(config)
+ self.head = GLPNDepthEstimationHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(GLPN_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=DepthEstimatorOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: torch.FloatTensor,
+ labels: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], DepthEstimatorOutput]:
+ r"""
+ labels (`torch.FloatTensor` of shape `(batch_size, height, width)`, *optional*):
+ Ground truth depth estimation maps for computing the loss.
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, GLPNForDepthEstimation
+ >>> import torch
+ >>> import numpy as np
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("vinvino02/glpn-kitti")
+ >>> model = GLPNForDepthEstimation.from_pretrained("vinvino02/glpn-kitti")
+
+ >>> # prepare image for the model
+ >>> inputs = image_processor(images=image, return_tensors="pt")
+
+ >>> with torch.no_grad():
+ ... outputs = model(**inputs)
+ ... predicted_depth = outputs.predicted_depth
+
+ >>> # interpolate to original size
+ >>> prediction = torch.nn.functional.interpolate(
+ ... predicted_depth.unsqueeze(1),
+ ... size=image.size[::-1],
+ ... mode="bicubic",
+ ... align_corners=False,
+ ... )
+
+ >>> # visualize the prediction
+ >>> output = prediction.squeeze().cpu().numpy()
+ >>> formatted = (output * 255 / np.max(output)).astype("uint8")
+ >>> depth = Image.fromarray(formatted)
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+
+ outputs = self.glpn(
+ pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=True, # we need the intermediate hidden states
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs.hidden_states if return_dict else outputs[1]
+
+ out = self.decoder(hidden_states)
+ predicted_depth = self.head(out)
+
+ loss = None
+ if labels is not None:
+ loss_fct = SiLogLoss()
+ loss = loss_fct(predicted_depth, labels)
+
+ if not return_dict:
+ if output_hidden_states:
+ output = (predicted_depth,) + outputs[1:]
+ else:
+ output = (predicted_depth,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return DepthEstimatorOutput(
+ loss=loss,
+ predicted_depth=predicted_depth,
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
+ attentions=outputs.attentions,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mobilevit/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/mobilevit/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5615c622186299a304eed40755677dddc5892996
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/mobilevit/__init__.py
@@ -0,0 +1,110 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_tf_available,
+ is_torch_available,
+ is_vision_available,
+)
+
+
+_import_structure = {
+ "configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
+}
+
+try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["feature_extraction_mobilevit"] = ["MobileViTFeatureExtractor"]
+ _import_structure["image_processing_mobilevit"] = ["MobileViTImageProcessor"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_mobilevit"] = [
+ "MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "MobileViTForImageClassification",
+ "MobileViTForSemanticSegmentation",
+ "MobileViTModel",
+ "MobileViTPreTrainedModel",
+ ]
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_mobilevit"] = [
+ "TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TFMobileViTForImageClassification",
+ "TFMobileViTForSemanticSegmentation",
+ "TFMobileViTModel",
+ "TFMobileViTPreTrainedModel",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
+
+ try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .feature_extraction_mobilevit import MobileViTFeatureExtractor
+ from .image_processing_mobilevit import MobileViTImageProcessor
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_mobilevit import (
+ MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ MobileViTForImageClassification,
+ MobileViTForSemanticSegmentation,
+ MobileViTModel,
+ MobileViTPreTrainedModel,
+ )
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_mobilevit import (
+ TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TFMobileViTForImageClassification,
+ TFMobileViTForSemanticSegmentation,
+ TFMobileViTModel,
+ TFMobileViTPreTrainedModel,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6558dc12bd4f8670f8061b18d8f38fd86467137c
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/configuration_mobilevit.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/configuration_mobilevit.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..73d7a3909c3e7896155248be6d87e78f8a3d6c1c
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/configuration_mobilevit.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/convert_mlcvnets_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/convert_mlcvnets_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7ce2cdc474bdeb3b04f3be0cc0a75872cf638ccd
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/convert_mlcvnets_to_pytorch.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/feature_extraction_mobilevit.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/feature_extraction_mobilevit.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..085210360eb6123dcb3f30c370e76c2e107b13bd
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/feature_extraction_mobilevit.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/image_processing_mobilevit.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/image_processing_mobilevit.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4949b38976a120ec31fd0e60d6adf44091af6d2d
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/image_processing_mobilevit.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/modeling_mobilevit.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/modeling_mobilevit.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d370e1ecbebce56393784047b19684176119f03b
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/modeling_mobilevit.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/modeling_tf_mobilevit.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/modeling_tf_mobilevit.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..12602d88b38a1e164340904982b8da307bb54af3
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/modeling_tf_mobilevit.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mobilevit/configuration_mobilevit.py b/venv/lib/python3.10/site-packages/transformers/models/mobilevit/configuration_mobilevit.py
new file mode 100644
index 0000000000000000000000000000000000000000..8f13112447f11331d0b801061407b4353a0bf609
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/mobilevit/configuration_mobilevit.py
@@ -0,0 +1,172 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" MobileViT model configuration"""
+
+from collections import OrderedDict
+from typing import Mapping
+
+from packaging import version
+
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class MobileViTConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`MobileViTModel`]. It is used to instantiate a
+ MobileViT model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the MobileViT
+ [apple/mobilevit-small](https://huggingface.co/apple/mobilevit-small) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ num_channels (`int`, *optional*, defaults to 3):
+ The number of input channels.
+ image_size (`int`, *optional*, defaults to 256):
+ The size (resolution) of each image.
+ patch_size (`int`, *optional*, defaults to 2):
+ The size (resolution) of each patch.
+ hidden_sizes (`List[int]`, *optional*, defaults to `[144, 192, 240]`):
+ Dimensionality (hidden size) of the Transformer encoders at each stage.
+ neck_hidden_sizes (`List[int]`, *optional*, defaults to `[16, 32, 64, 96, 128, 160, 640]`):
+ The number of channels for the feature maps of the backbone.
+ num_attention_heads (`int`, *optional*, defaults to 4):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ mlp_ratio (`float`, *optional*, defaults to 2.0):
+ The ratio of the number of channels in the output of the MLP to the number of channels in the input.
+ expand_ratio (`float`, *optional*, defaults to 4.0):
+ Expansion factor for the MobileNetv2 layers.
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
+ The non-linear activation function (function or string) in the Transformer encoder and convolution layers.
+ conv_kernel_size (`int`, *optional*, defaults to 3):
+ The size of the convolutional kernel in the MobileViT layer.
+ output_stride (`int`, *optional*, defaults to 32):
+ The ratio of the spatial resolution of the output to the resolution of the input image.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the Transformer encoder.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ classifier_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for attached classifiers.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
+ The epsilon used by the layer normalization layers.
+ qkv_bias (`bool`, *optional*, defaults to `True`):
+ Whether to add a bias to the queries, keys and values.
+ aspp_out_channels (`int`, *optional*, defaults to 256):
+ Number of output channels used in the ASPP layer for semantic segmentation.
+ atrous_rates (`List[int]`, *optional*, defaults to `[6, 12, 18]`):
+ Dilation (atrous) factors used in the ASPP layer for semantic segmentation.
+ aspp_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the ASPP layer for semantic segmentation.
+ semantic_loss_ignore_index (`int`, *optional*, defaults to 255):
+ The index that is ignored by the loss function of the semantic segmentation model.
+
+ Example:
+
+ ```python
+ >>> from transformers import MobileViTConfig, MobileViTModel
+
+ >>> # Initializing a mobilevit-small style configuration
+ >>> configuration = MobileViTConfig()
+
+ >>> # Initializing a model from the mobilevit-small style configuration
+ >>> model = MobileViTModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "mobilevit"
+
+ def __init__(
+ self,
+ num_channels=3,
+ image_size=256,
+ patch_size=2,
+ hidden_sizes=[144, 192, 240],
+ neck_hidden_sizes=[16, 32, 64, 96, 128, 160, 640],
+ num_attention_heads=4,
+ mlp_ratio=2.0,
+ expand_ratio=4.0,
+ hidden_act="silu",
+ conv_kernel_size=3,
+ output_stride=32,
+ hidden_dropout_prob=0.1,
+ attention_probs_dropout_prob=0.0,
+ classifier_dropout_prob=0.1,
+ initializer_range=0.02,
+ layer_norm_eps=1e-5,
+ qkv_bias=True,
+ aspp_out_channels=256,
+ atrous_rates=[6, 12, 18],
+ aspp_dropout_prob=0.1,
+ semantic_loss_ignore_index=255,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.num_channels = num_channels
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.hidden_sizes = hidden_sizes
+ self.neck_hidden_sizes = neck_hidden_sizes
+ self.num_attention_heads = num_attention_heads
+ self.mlp_ratio = mlp_ratio
+ self.expand_ratio = expand_ratio
+ self.hidden_act = hidden_act
+ self.conv_kernel_size = conv_kernel_size
+ self.output_stride = output_stride
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.classifier_dropout_prob = classifier_dropout_prob
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.qkv_bias = qkv_bias
+
+ # decode head attributes for semantic segmentation
+ self.aspp_out_channels = aspp_out_channels
+ self.atrous_rates = atrous_rates
+ self.aspp_dropout_prob = aspp_dropout_prob
+ self.semantic_loss_ignore_index = semantic_loss_ignore_index
+
+
+class MobileViTOnnxConfig(OnnxConfig):
+ torch_onnx_minimum_version = version.parse("1.11")
+
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ return OrderedDict([("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"})])
+
+ @property
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
+ if self.task == "image-classification":
+ return OrderedDict([("logits", {0: "batch"})])
+ else:
+ return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})])
+
+ @property
+ def atol_for_validation(self) -> float:
+ return 1e-4
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mobilevit/convert_mlcvnets_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/mobilevit/convert_mlcvnets_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..e251b124b4650dabd80ee8d6393018eda789e3f3
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/mobilevit/convert_mlcvnets_to_pytorch.py
@@ -0,0 +1,312 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert MobileViT checkpoints from the ml-cvnets library."""
+
+
+import argparse
+import json
+from pathlib import Path
+
+import requests
+import torch
+from huggingface_hub import hf_hub_download
+from PIL import Image
+
+from transformers import (
+ MobileViTConfig,
+ MobileViTForImageClassification,
+ MobileViTForSemanticSegmentation,
+ MobileViTImageProcessor,
+)
+from transformers.utils import logging
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+
+def get_mobilevit_config(mobilevit_name):
+ config = MobileViTConfig()
+
+ # size of the architecture
+ if "mobilevit_s" in mobilevit_name:
+ config.hidden_sizes = [144, 192, 240]
+ config.neck_hidden_sizes = [16, 32, 64, 96, 128, 160, 640]
+ elif "mobilevit_xs" in mobilevit_name:
+ config.hidden_sizes = [96, 120, 144]
+ config.neck_hidden_sizes = [16, 32, 48, 64, 80, 96, 384]
+ elif "mobilevit_xxs" in mobilevit_name:
+ config.hidden_sizes = [64, 80, 96]
+ config.neck_hidden_sizes = [16, 16, 24, 48, 64, 80, 320]
+ config.hidden_dropout_prob = 0.05
+ config.expand_ratio = 2.0
+
+ if mobilevit_name.startswith("deeplabv3_"):
+ config.image_size = 512
+ config.output_stride = 16
+ config.num_labels = 21
+ filename = "pascal-voc-id2label.json"
+ else:
+ config.num_labels = 1000
+ filename = "imagenet-1k-id2label.json"
+
+ repo_id = "huggingface/label-files"
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
+ id2label = {int(k): v for k, v in id2label.items()}
+ config.id2label = id2label
+ config.label2id = {v: k for k, v in id2label.items()}
+
+ return config
+
+
+def rename_key(name, base_model=False):
+ for i in range(1, 6):
+ if f"layer_{i}." in name:
+ name = name.replace(f"layer_{i}.", f"encoder.layer.{i - 1}.")
+
+ if "conv_1." in name:
+ name = name.replace("conv_1.", "conv_stem.")
+ if ".block." in name:
+ name = name.replace(".block.", ".")
+
+ if "exp_1x1" in name:
+ name = name.replace("exp_1x1", "expand_1x1")
+ if "red_1x1" in name:
+ name = name.replace("red_1x1", "reduce_1x1")
+ if ".local_rep.conv_3x3." in name:
+ name = name.replace(".local_rep.conv_3x3.", ".conv_kxk.")
+ if ".local_rep.conv_1x1." in name:
+ name = name.replace(".local_rep.conv_1x1.", ".conv_1x1.")
+ if ".norm." in name:
+ name = name.replace(".norm.", ".normalization.")
+ if ".conv." in name:
+ name = name.replace(".conv.", ".convolution.")
+ if ".conv_proj." in name:
+ name = name.replace(".conv_proj.", ".conv_projection.")
+
+ for i in range(0, 2):
+ for j in range(0, 4):
+ if f".{i}.{j}." in name:
+ name = name.replace(f".{i}.{j}.", f".{i}.layer.{j}.")
+
+ for i in range(2, 6):
+ for j in range(0, 4):
+ if f".{i}.{j}." in name:
+ name = name.replace(f".{i}.{j}.", f".{i}.")
+ if "expand_1x1" in name:
+ name = name.replace("expand_1x1", "downsampling_layer.expand_1x1")
+ if "conv_3x3" in name:
+ name = name.replace("conv_3x3", "downsampling_layer.conv_3x3")
+ if "reduce_1x1" in name:
+ name = name.replace("reduce_1x1", "downsampling_layer.reduce_1x1")
+
+ for i in range(2, 5):
+ if f".global_rep.{i}.weight" in name:
+ name = name.replace(f".global_rep.{i}.weight", ".layernorm.weight")
+ if f".global_rep.{i}.bias" in name:
+ name = name.replace(f".global_rep.{i}.bias", ".layernorm.bias")
+
+ if ".global_rep." in name:
+ name = name.replace(".global_rep.", ".transformer.")
+ if ".pre_norm_mha.0." in name:
+ name = name.replace(".pre_norm_mha.0.", ".layernorm_before.")
+ if ".pre_norm_mha.1.out_proj." in name:
+ name = name.replace(".pre_norm_mha.1.out_proj.", ".attention.output.dense.")
+ if ".pre_norm_ffn.0." in name:
+ name = name.replace(".pre_norm_ffn.0.", ".layernorm_after.")
+ if ".pre_norm_ffn.1." in name:
+ name = name.replace(".pre_norm_ffn.1.", ".intermediate.dense.")
+ if ".pre_norm_ffn.4." in name:
+ name = name.replace(".pre_norm_ffn.4.", ".output.dense.")
+ if ".transformer." in name:
+ name = name.replace(".transformer.", ".transformer.layer.")
+
+ if ".aspp_layer." in name:
+ name = name.replace(".aspp_layer.", ".")
+ if ".aspp_pool." in name:
+ name = name.replace(".aspp_pool.", ".")
+ if "seg_head." in name:
+ name = name.replace("seg_head.", "segmentation_head.")
+ if "segmentation_head.classifier.classifier." in name:
+ name = name.replace("segmentation_head.classifier.classifier.", "segmentation_head.classifier.")
+
+ if "classifier.fc." in name:
+ name = name.replace("classifier.fc.", "classifier.")
+ elif (not base_model) and ("segmentation_head." not in name):
+ name = "mobilevit." + name
+
+ return name
+
+
+def convert_state_dict(orig_state_dict, model, base_model=False):
+ if base_model:
+ model_prefix = ""
+ else:
+ model_prefix = "mobilevit."
+
+ for key in orig_state_dict.copy().keys():
+ val = orig_state_dict.pop(key)
+
+ if key[:8] == "encoder.":
+ key = key[8:]
+
+ if "qkv" in key:
+ key_split = key.split(".")
+ layer_num = int(key_split[0][6:]) - 1
+ transformer_num = int(key_split[3])
+ layer = model.get_submodule(f"{model_prefix}encoder.layer.{layer_num}")
+ dim = layer.transformer.layer[transformer_num].attention.attention.all_head_size
+ prefix = (
+ f"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
+ )
+ if "weight" in key:
+ orig_state_dict[prefix + "query.weight"] = val[:dim, :]
+ orig_state_dict[prefix + "key.weight"] = val[dim : dim * 2, :]
+ orig_state_dict[prefix + "value.weight"] = val[-dim:, :]
+ else:
+ orig_state_dict[prefix + "query.bias"] = val[:dim]
+ orig_state_dict[prefix + "key.bias"] = val[dim : dim * 2]
+ orig_state_dict[prefix + "value.bias"] = val[-dim:]
+ else:
+ orig_state_dict[rename_key(key, base_model)] = val
+
+ return orig_state_dict
+
+
+# We will verify our results on an image of cute cats
+def prepare_img():
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ im = Image.open(requests.get(url, stream=True).raw)
+ return im
+
+
+@torch.no_grad()
+def convert_movilevit_checkpoint(mobilevit_name, checkpoint_path, pytorch_dump_folder_path, push_to_hub=False):
+ """
+ Copy/paste/tweak model's weights to our MobileViT structure.
+ """
+ config = get_mobilevit_config(mobilevit_name)
+
+ # load original state_dict
+ state_dict = torch.load(checkpoint_path, map_location="cpu")
+
+ # load 🤗 model
+ if mobilevit_name.startswith("deeplabv3_"):
+ model = MobileViTForSemanticSegmentation(config).eval()
+ else:
+ model = MobileViTForImageClassification(config).eval()
+
+ new_state_dict = convert_state_dict(state_dict, model)
+ model.load_state_dict(new_state_dict)
+
+ # Check outputs on an image, prepared by MobileViTImageProcessor
+ image_processor = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 32)
+ encoding = image_processor(images=prepare_img(), return_tensors="pt")
+ outputs = model(**encoding)
+ logits = outputs.logits
+
+ if mobilevit_name.startswith("deeplabv3_"):
+ assert logits.shape == (1, 21, 32, 32)
+
+ if mobilevit_name == "deeplabv3_mobilevit_s":
+ expected_logits = torch.tensor(
+ [
+ [[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
+ [[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
+ [[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
+ ]
+ )
+ elif mobilevit_name == "deeplabv3_mobilevit_xs":
+ expected_logits = torch.tensor(
+ [
+ [[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
+ [[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
+ [[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
+ ]
+ )
+ elif mobilevit_name == "deeplabv3_mobilevit_xxs":
+ expected_logits = torch.tensor(
+ [
+ [[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
+ [[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
+ [[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
+ ]
+ )
+ else:
+ raise ValueError(f"Unknown mobilevit_name: {mobilevit_name}")
+
+ assert torch.allclose(logits[0, :3, :3, :3], expected_logits, atol=1e-4)
+ else:
+ assert logits.shape == (1, 1000)
+
+ if mobilevit_name == "mobilevit_s":
+ expected_logits = torch.tensor([-0.9866, 0.2392, -1.1241])
+ elif mobilevit_name == "mobilevit_xs":
+ expected_logits = torch.tensor([-2.4761, -0.9399, -1.9587])
+ elif mobilevit_name == "mobilevit_xxs":
+ expected_logits = torch.tensor([-1.9364, -1.2327, -0.4653])
+ else:
+ raise ValueError(f"Unknown mobilevit_name: {mobilevit_name}")
+
+ assert torch.allclose(logits[0, :3], expected_logits, atol=1e-4)
+
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
+ print(f"Saving model {mobilevit_name} to {pytorch_dump_folder_path}")
+ model.save_pretrained(pytorch_dump_folder_path)
+ print(f"Saving image processor to {pytorch_dump_folder_path}")
+ image_processor.save_pretrained(pytorch_dump_folder_path)
+
+ if push_to_hub:
+ model_mapping = {
+ "mobilevit_s": "mobilevit-small",
+ "mobilevit_xs": "mobilevit-x-small",
+ "mobilevit_xxs": "mobilevit-xx-small",
+ "deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
+ "deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
+ "deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
+ }
+
+ print("Pushing to the hub...")
+ model_name = model_mapping[mobilevit_name]
+ image_processor.push_to_hub(model_name, organization="apple")
+ model.push_to_hub(model_name, organization="apple")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--mobilevit_name",
+ default="mobilevit_s",
+ type=str,
+ help=(
+ "Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"
+ " 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."
+ ),
+ )
+ parser.add_argument(
+ "--checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
+ )
+ parser.add_argument(
+ "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
+ )
+
+ args = parser.parse_args()
+ convert_movilevit_checkpoint(
+ args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mobilevit/feature_extraction_mobilevit.py b/venv/lib/python3.10/site-packages/transformers/models/mobilevit/feature_extraction_mobilevit.py
new file mode 100644
index 0000000000000000000000000000000000000000..a73baed6405c50339a7bb024348a6f417770bf20
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/mobilevit/feature_extraction_mobilevit.py
@@ -0,0 +1,33 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Feature extractor class for MobileViT."""
+
+import warnings
+
+from ...utils import logging
+from .image_processing_mobilevit import MobileViTImageProcessor
+
+
+logger = logging.get_logger(__name__)
+
+
+class MobileViTFeatureExtractor(MobileViTImageProcessor):
+ def __init__(self, *args, **kwargs) -> None:
+ warnings.warn(
+ "The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
+ " Please use MobileViTImageProcessor instead.",
+ FutureWarning,
+ )
+ super().__init__(*args, **kwargs)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mobilevit/image_processing_mobilevit.py b/venv/lib/python3.10/site-packages/transformers/models/mobilevit/image_processing_mobilevit.py
new file mode 100644
index 0000000000000000000000000000000000000000..8cc79a283e05af739885f578b4bbb7f7abe878ca
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/mobilevit/image_processing_mobilevit.py
@@ -0,0 +1,493 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Image processor class for MobileViT."""
+
+from typing import Dict, List, Optional, Tuple, Union
+
+import numpy as np
+
+from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
+from ...image_transforms import flip_channel_order, get_resize_output_image_size, resize, to_channel_dimension_format
+from ...image_utils import (
+ ChannelDimension,
+ ImageInput,
+ PILImageResampling,
+ infer_channel_dimension_format,
+ is_scaled_image,
+ make_list_of_images,
+ to_numpy_array,
+ valid_images,
+ validate_kwargs,
+ validate_preprocess_arguments,
+)
+from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
+
+
+if is_vision_available():
+ import PIL
+
+if is_torch_available():
+ import torch
+
+
+logger = logging.get_logger(__name__)
+
+
+class MobileViTImageProcessor(BaseImageProcessor):
+ r"""
+ Constructs a MobileViT image processor.
+
+ Args:
+ do_resize (`bool`, *optional*, defaults to `True`):
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
+ `do_resize` parameter in the `preprocess` method.
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
+ Controls the size of the output image after resizing. Can be overridden by the `size` parameter in the
+ `preprocess` method.
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
+ Defines the resampling filter to use if resizing the image. Can be overridden by the `resample` parameter
+ in the `preprocess` method.
+ do_rescale (`bool`, *optional*, defaults to `True`):
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
+ parameter in the `preprocess` method.
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
+ `preprocess` method.
+ do_center_crop (`bool`, *optional*, defaults to `True`):
+ Whether to crop the input at the center. If the input size is smaller than `crop_size` along any edge, the
+ image is padded with 0's and then center cropped. Can be overridden by the `do_center_crop` parameter in
+ the `preprocess` method.
+ crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 256, "width": 256}`):
+ Desired output size `(size["height"], size["width"])` when applying center-cropping. Can be overridden by
+ the `crop_size` parameter in the `preprocess` method.
+ do_flip_channel_order (`bool`, *optional*, defaults to `True`):
+ Whether to flip the color channels from RGB to BGR. Can be overridden by the `do_flip_channel_order`
+ parameter in the `preprocess` method.
+ """
+
+ model_input_names = ["pixel_values"]
+
+ def __init__(
+ self,
+ do_resize: bool = True,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
+ do_rescale: bool = True,
+ rescale_factor: Union[int, float] = 1 / 255,
+ do_center_crop: bool = True,
+ crop_size: Dict[str, int] = None,
+ do_flip_channel_order: bool = True,
+ **kwargs,
+ ) -> None:
+ super().__init__(**kwargs)
+ size = size if size is not None else {"shortest_edge": 224}
+ size = get_size_dict(size, default_to_square=False)
+ crop_size = crop_size if crop_size is not None else {"height": 256, "width": 256}
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
+
+ self.do_resize = do_resize
+ self.size = size
+ self.resample = resample
+ self.do_rescale = do_rescale
+ self.rescale_factor = rescale_factor
+ self.do_center_crop = do_center_crop
+ self.crop_size = crop_size
+ self.do_flip_channel_order = do_flip_channel_order
+ self._valid_processor_keys = [
+ "images",
+ "segmentation_maps",
+ "do_resize",
+ "size",
+ "resample",
+ "do_rescale",
+ "rescale_factor",
+ "do_center_crop",
+ "crop_size",
+ "do_flip_channel_order",
+ "return_tensors",
+ "data_format",
+ "input_data_format",
+ ]
+
+ # Copied from transformers.models.mobilenet_v1.image_processing_mobilenet_v1.MobileNetV1ImageProcessor.resize with PILImageResampling.BICUBIC->PILImageResampling.BILINEAR
+ def resize(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> np.ndarray:
+ """
+ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
+ resized to keep the input aspect ratio.
+
+ Args:
+ image (`np.ndarray`):
+ Image to resize.
+ size (`Dict[str, int]`):
+ Size of the output image.
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
+ Resampling filter to use when resiizing the image.
+ data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred.
+ """
+ default_to_square = True
+ if "shortest_edge" in size:
+ size = size["shortest_edge"]
+ default_to_square = False
+ elif "height" in size and "width" in size:
+ size = (size["height"], size["width"])
+ else:
+ raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.")
+
+ output_size = get_resize_output_image_size(
+ image,
+ size=size,
+ default_to_square=default_to_square,
+ input_data_format=input_data_format,
+ )
+ return resize(
+ image,
+ size=output_size,
+ resample=resample,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ **kwargs,
+ )
+
+ def flip_channel_order(
+ self,
+ image: np.ndarray,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> np.ndarray:
+ """
+ Flip the color channels from RGB to BGR or vice versa.
+
+ Args:
+ image (`np.ndarray`):
+ The image, represented as a numpy array.
+ data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred.
+ """
+ return flip_channel_order(image, data_format=data_format, input_data_format=input_data_format)
+
+ def __call__(self, images, segmentation_maps=None, **kwargs):
+ """
+ Preprocesses a batch of images and optionally segmentation maps.
+
+ Overrides the `__call__` method of the `Preprocessor` class so that both images and segmentation maps can be
+ passed in as positional arguments.
+ """
+ return super().__call__(images, segmentation_maps=segmentation_maps, **kwargs)
+
+ def _preprocess(
+ self,
+ image: ImageInput,
+ do_resize: bool,
+ do_rescale: bool,
+ do_center_crop: bool,
+ do_flip_channel_order: bool,
+ size: Optional[Dict[str, int]] = None,
+ resample: PILImageResampling = None,
+ rescale_factor: Optional[float] = None,
+ crop_size: Optional[Dict[str, int]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ):
+ if do_resize:
+ image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
+
+ if do_rescale:
+ image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
+
+ if do_center_crop:
+ image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)
+
+ if do_flip_channel_order:
+ image = self.flip_channel_order(image, input_data_format=input_data_format)
+
+ return image
+
+ def _preprocess_image(
+ self,
+ image: ImageInput,
+ do_resize: bool = None,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = None,
+ do_rescale: bool = None,
+ rescale_factor: float = None,
+ do_center_crop: bool = None,
+ crop_size: Dict[str, int] = None,
+ do_flip_channel_order: bool = None,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> np.ndarray:
+ """Preprocesses a single image."""
+ # All transformations expect numpy arrays.
+ image = to_numpy_array(image)
+ if is_scaled_image(image) and do_rescale:
+ logger.warning_once(
+ "It looks like you are trying to rescale already rescaled images. If the input"
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
+ )
+ if input_data_format is None:
+ input_data_format = infer_channel_dimension_format(image)
+
+ image = self._preprocess(
+ image=image,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_center_crop=do_center_crop,
+ crop_size=crop_size,
+ do_flip_channel_order=do_flip_channel_order,
+ input_data_format=input_data_format,
+ )
+
+ image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
+
+ return image
+
+ def _preprocess_mask(
+ self,
+ segmentation_map: ImageInput,
+ do_resize: bool = None,
+ size: Dict[str, int] = None,
+ do_center_crop: bool = None,
+ crop_size: Dict[str, int] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> np.ndarray:
+ """Preprocesses a single mask."""
+ segmentation_map = to_numpy_array(segmentation_map)
+ # Add channel dimension if missing - needed for certain transformations
+ if segmentation_map.ndim == 2:
+ added_channel_dim = True
+ segmentation_map = segmentation_map[None, ...]
+ input_data_format = ChannelDimension.FIRST
+ else:
+ added_channel_dim = False
+ if input_data_format is None:
+ input_data_format = infer_channel_dimension_format(segmentation_map, num_channels=1)
+
+ segmentation_map = self._preprocess(
+ image=segmentation_map,
+ do_resize=do_resize,
+ size=size,
+ resample=PILImageResampling.NEAREST,
+ do_rescale=False,
+ do_center_crop=do_center_crop,
+ crop_size=crop_size,
+ do_flip_channel_order=False,
+ input_data_format=input_data_format,
+ )
+ # Remove extra channel dimension if added for processing
+ if added_channel_dim:
+ segmentation_map = segmentation_map.squeeze(0)
+ segmentation_map = segmentation_map.astype(np.int64)
+ return segmentation_map
+
+ def preprocess(
+ self,
+ images: ImageInput,
+ segmentation_maps: Optional[ImageInput] = None,
+ do_resize: bool = None,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = None,
+ do_rescale: bool = None,
+ rescale_factor: float = None,
+ do_center_crop: bool = None,
+ crop_size: Dict[str, int] = None,
+ do_flip_channel_order: bool = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ data_format: ChannelDimension = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> PIL.Image.Image:
+ """
+ Preprocess an image or batch of images.
+
+ Args:
+ images (`ImageInput`):
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
+ segmentation_maps (`ImageInput`, *optional*):
+ Segmentation map to preprocess.
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
+ Whether to resize the image.
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
+ Size of the image after resizing.
+ resample (`int`, *optional*, defaults to `self.resample`):
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
+ has an effect if `do_resize` is set to `True`.
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
+ Whether to rescale the image by rescale factor.
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
+ Whether to center crop the image.
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
+ Size of the center crop if `do_center_crop` is set to `True`.
+ do_flip_channel_order (`bool`, *optional*, defaults to `self.do_flip_channel_order`):
+ Whether to flip the channel order of the image.
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Can be one of:
+ - Unset: Return a list of `np.ndarray`.
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format for the output image. Can be one of:
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ """
+ do_resize = do_resize if do_resize is not None else self.do_resize
+ resample = resample if resample is not None else self.resample
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
+ do_flip_channel_order = (
+ do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
+ )
+
+ size = size if size is not None else self.size
+ size = get_size_dict(size, default_to_square=False)
+ crop_size = crop_size if crop_size is not None else self.crop_size
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
+
+ images = make_list_of_images(images)
+
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
+
+ if segmentation_maps is not None:
+ segmentation_maps = make_list_of_images(segmentation_maps, expected_ndims=2)
+
+ images = make_list_of_images(images)
+
+ if not valid_images(images):
+ raise ValueError(
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+
+ if segmentation_maps is not None and not valid_images(segmentation_maps):
+ raise ValueError(
+ "Invalid segmentation map type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+
+ validate_preprocess_arguments(
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_center_crop=do_center_crop,
+ crop_size=crop_size,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ )
+
+ images = [
+ self._preprocess_image(
+ image=img,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_center_crop=do_center_crop,
+ crop_size=crop_size,
+ do_flip_channel_order=do_flip_channel_order,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ )
+ for img in images
+ ]
+
+ data = {"pixel_values": images}
+
+ if segmentation_maps is not None:
+ segmentation_maps = [
+ self._preprocess_mask(
+ segmentation_map=segmentation_map,
+ do_resize=do_resize,
+ size=size,
+ do_center_crop=do_center_crop,
+ crop_size=crop_size,
+ input_data_format=input_data_format,
+ )
+ for segmentation_map in segmentation_maps
+ ]
+
+ data["labels"] = segmentation_maps
+
+ return BatchFeature(data=data, tensor_type=return_tensors)
+
+ # Copied from transformers.models.beit.image_processing_beit.BeitImageProcessor.post_process_semantic_segmentation with Beit->MobileViT
+ def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple] = None):
+ """
+ Converts the output of [`MobileViTForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch.
+
+ Args:
+ outputs ([`MobileViTForSemanticSegmentation`]):
+ Raw outputs of the model.
+ target_sizes (`List[Tuple]` of length `batch_size`, *optional*):
+ List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
+ predictions will not be resized.
+
+ Returns:
+ semantic_segmentation: `List[torch.Tensor]` of length `batch_size`, where each item is a semantic
+ segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
+ specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
+ """
+ # TODO: add support for other frameworks
+ logits = outputs.logits
+
+ # Resize logits and compute semantic segmentation maps
+ if target_sizes is not None:
+ if len(logits) != len(target_sizes):
+ raise ValueError(
+ "Make sure that you pass in as many target sizes as the batch dimension of the logits"
+ )
+
+ if is_torch_tensor(target_sizes):
+ target_sizes = target_sizes.numpy()
+
+ semantic_segmentation = []
+
+ for idx in range(len(logits)):
+ resized_logits = torch.nn.functional.interpolate(
+ logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
+ )
+ semantic_map = resized_logits[0].argmax(dim=0)
+ semantic_segmentation.append(semantic_map)
+ else:
+ semantic_segmentation = logits.argmax(dim=1)
+ semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
+
+ return semantic_segmentation
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mobilevit/modeling_mobilevit.py b/venv/lib/python3.10/site-packages/transformers/models/mobilevit/modeling_mobilevit.py
new file mode 100644
index 0000000000000000000000000000000000000000..939982148cc606c20d10f6e79b74a66130b17024
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/mobilevit/modeling_mobilevit.py
@@ -0,0 +1,1066 @@
+# coding=utf-8
+# Copyright 2022 Apple Inc. and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Original license: https://github.com/apple/ml-cvnets/blob/main/LICENSE
+""" PyTorch MobileViT model."""
+
+
+import math
+from typing import Dict, Optional, Set, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import (
+ BaseModelOutputWithNoAttention,
+ BaseModelOutputWithPoolingAndNoAttention,
+ ImageClassifierOutputWithNoAttention,
+ SemanticSegmenterOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_mobilevit import MobileViTConfig
+
+
+logger = logging.get_logger(__name__)
+
+
+# General docstring
+_CONFIG_FOR_DOC = "MobileViTConfig"
+
+# Base docstring
+_CHECKPOINT_FOR_DOC = "apple/mobilevit-small"
+_EXPECTED_OUTPUT_SHAPE = [1, 640, 8, 8]
+
+# Image classification docstring
+_IMAGE_CLASS_CHECKPOINT = "apple/mobilevit-small"
+_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
+
+
+from ..deprecated._archive_maps import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+def make_divisible(value: int, divisor: int = 8, min_value: Optional[int] = None) -> int:
+ """
+ Ensure that all layers have a channel count that is divisible by `divisor`. This function is taken from the
+ original TensorFlow repo. It can be seen here:
+ https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
+ """
+ if min_value is None:
+ min_value = divisor
+ new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)
+ # Make sure that round down does not go down by more than 10%.
+ if new_value < 0.9 * value:
+ new_value += divisor
+ return int(new_value)
+
+
+class MobileViTConvLayer(nn.Module):
+ def __init__(
+ self,
+ config: MobileViTConfig,
+ in_channels: int,
+ out_channels: int,
+ kernel_size: int,
+ stride: int = 1,
+ groups: int = 1,
+ bias: bool = False,
+ dilation: int = 1,
+ use_normalization: bool = True,
+ use_activation: Union[bool, str] = True,
+ ) -> None:
+ super().__init__()
+ padding = int((kernel_size - 1) / 2) * dilation
+
+ if in_channels % groups != 0:
+ raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups.")
+ if out_channels % groups != 0:
+ raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups.")
+
+ self.convolution = nn.Conv2d(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=kernel_size,
+ stride=stride,
+ padding=padding,
+ dilation=dilation,
+ groups=groups,
+ bias=bias,
+ padding_mode="zeros",
+ )
+
+ if use_normalization:
+ self.normalization = nn.BatchNorm2d(
+ num_features=out_channels,
+ eps=1e-5,
+ momentum=0.1,
+ affine=True,
+ track_running_stats=True,
+ )
+ else:
+ self.normalization = None
+
+ if use_activation:
+ if isinstance(use_activation, str):
+ self.activation = ACT2FN[use_activation]
+ elif isinstance(config.hidden_act, str):
+ self.activation = ACT2FN[config.hidden_act]
+ else:
+ self.activation = config.hidden_act
+ else:
+ self.activation = None
+
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
+ features = self.convolution(features)
+ if self.normalization is not None:
+ features = self.normalization(features)
+ if self.activation is not None:
+ features = self.activation(features)
+ return features
+
+
+class MobileViTInvertedResidual(nn.Module):
+ """
+ Inverted residual block (MobileNetv2): https://arxiv.org/abs/1801.04381
+ """
+
+ def __init__(
+ self, config: MobileViTConfig, in_channels: int, out_channels: int, stride: int, dilation: int = 1
+ ) -> None:
+ super().__init__()
+ expanded_channels = make_divisible(int(round(in_channels * config.expand_ratio)), 8)
+
+ if stride not in [1, 2]:
+ raise ValueError(f"Invalid stride {stride}.")
+
+ self.use_residual = (stride == 1) and (in_channels == out_channels)
+
+ self.expand_1x1 = MobileViTConvLayer(
+ config, in_channels=in_channels, out_channels=expanded_channels, kernel_size=1
+ )
+
+ self.conv_3x3 = MobileViTConvLayer(
+ config,
+ in_channels=expanded_channels,
+ out_channels=expanded_channels,
+ kernel_size=3,
+ stride=stride,
+ groups=expanded_channels,
+ dilation=dilation,
+ )
+
+ self.reduce_1x1 = MobileViTConvLayer(
+ config,
+ in_channels=expanded_channels,
+ out_channels=out_channels,
+ kernel_size=1,
+ use_activation=False,
+ )
+
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
+ residual = features
+
+ features = self.expand_1x1(features)
+ features = self.conv_3x3(features)
+ features = self.reduce_1x1(features)
+
+ return residual + features if self.use_residual else features
+
+
+class MobileViTMobileNetLayer(nn.Module):
+ def __init__(
+ self, config: MobileViTConfig, in_channels: int, out_channels: int, stride: int = 1, num_stages: int = 1
+ ) -> None:
+ super().__init__()
+
+ self.layer = nn.ModuleList()
+ for i in range(num_stages):
+ layer = MobileViTInvertedResidual(
+ config,
+ in_channels=in_channels,
+ out_channels=out_channels,
+ stride=stride if i == 0 else 1,
+ )
+ self.layer.append(layer)
+ in_channels = out_channels
+
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
+ for layer_module in self.layer:
+ features = layer_module(features)
+ return features
+
+
+class MobileViTSelfAttention(nn.Module):
+ def __init__(self, config: MobileViTConfig, hidden_size: int) -> None:
+ super().__init__()
+
+ if hidden_size % config.num_attention_heads != 0:
+ raise ValueError(
+ f"The hidden size {hidden_size,} is not a multiple of the number of attention "
+ f"heads {config.num_attention_heads}."
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(hidden_size, self.all_head_size, bias=config.qkv_bias)
+ self.key = nn.Linear(hidden_size, self.all_head_size, bias=config.qkv_bias)
+ self.value = nn.Linear(hidden_size, self.all_head_size, bias=config.qkv_bias)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(*new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ mixed_query_layer = self.query(hidden_states)
+
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(*new_context_layer_shape)
+ return context_layer
+
+
+class MobileViTSelfOutput(nn.Module):
+ def __init__(self, config: MobileViTConfig, hidden_size: int) -> None:
+ super().__init__()
+ self.dense = nn.Linear(hidden_size, hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ return hidden_states
+
+
+class MobileViTAttention(nn.Module):
+ def __init__(self, config: MobileViTConfig, hidden_size: int) -> None:
+ super().__init__()
+ self.attention = MobileViTSelfAttention(config, hidden_size)
+ self.output = MobileViTSelfOutput(config, hidden_size)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads: Set[int]) -> None:
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.attention.query = prune_linear_layer(self.attention.query, index)
+ self.attention.key = prune_linear_layer(self.attention.key, index)
+ self.attention.value = prune_linear_layer(self.attention.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ self_outputs = self.attention(hidden_states)
+ attention_output = self.output(self_outputs)
+ return attention_output
+
+
+class MobileViTIntermediate(nn.Module):
+ def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int) -> None:
+ super().__init__()
+ self.dense = nn.Linear(hidden_size, intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+class MobileViTOutput(nn.Module):
+ def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int) -> None:
+ super().__init__()
+ self.dense = nn.Linear(intermediate_size, hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = hidden_states + input_tensor
+ return hidden_states
+
+
+class MobileViTTransformerLayer(nn.Module):
+ def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int) -> None:
+ super().__init__()
+ self.attention = MobileViTAttention(config, hidden_size)
+ self.intermediate = MobileViTIntermediate(config, hidden_size, intermediate_size)
+ self.output = MobileViTOutput(config, hidden_size, intermediate_size)
+ self.layernorm_before = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps)
+ self.layernorm_after = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ attention_output = self.attention(self.layernorm_before(hidden_states))
+ hidden_states = attention_output + hidden_states
+
+ layer_output = self.layernorm_after(hidden_states)
+ layer_output = self.intermediate(layer_output)
+ layer_output = self.output(layer_output, hidden_states)
+ return layer_output
+
+
+class MobileViTTransformer(nn.Module):
+ def __init__(self, config: MobileViTConfig, hidden_size: int, num_stages: int) -> None:
+ super().__init__()
+
+ self.layer = nn.ModuleList()
+ for _ in range(num_stages):
+ transformer_layer = MobileViTTransformerLayer(
+ config,
+ hidden_size=hidden_size,
+ intermediate_size=int(hidden_size * config.mlp_ratio),
+ )
+ self.layer.append(transformer_layer)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ for layer_module in self.layer:
+ hidden_states = layer_module(hidden_states)
+ return hidden_states
+
+
+class MobileViTLayer(nn.Module):
+ """
+ MobileViT block: https://arxiv.org/abs/2110.02178
+ """
+
+ def __init__(
+ self,
+ config: MobileViTConfig,
+ in_channels: int,
+ out_channels: int,
+ stride: int,
+ hidden_size: int,
+ num_stages: int,
+ dilation: int = 1,
+ ) -> None:
+ super().__init__()
+ self.patch_width = config.patch_size
+ self.patch_height = config.patch_size
+
+ if stride == 2:
+ self.downsampling_layer = MobileViTInvertedResidual(
+ config,
+ in_channels=in_channels,
+ out_channels=out_channels,
+ stride=stride if dilation == 1 else 1,
+ dilation=dilation // 2 if dilation > 1 else 1,
+ )
+ in_channels = out_channels
+ else:
+ self.downsampling_layer = None
+
+ self.conv_kxk = MobileViTConvLayer(
+ config,
+ in_channels=in_channels,
+ out_channels=in_channels,
+ kernel_size=config.conv_kernel_size,
+ )
+
+ self.conv_1x1 = MobileViTConvLayer(
+ config,
+ in_channels=in_channels,
+ out_channels=hidden_size,
+ kernel_size=1,
+ use_normalization=False,
+ use_activation=False,
+ )
+
+ self.transformer = MobileViTTransformer(
+ config,
+ hidden_size=hidden_size,
+ num_stages=num_stages,
+ )
+
+ self.layernorm = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps)
+
+ self.conv_projection = MobileViTConvLayer(
+ config, in_channels=hidden_size, out_channels=in_channels, kernel_size=1
+ )
+
+ self.fusion = MobileViTConvLayer(
+ config, in_channels=2 * in_channels, out_channels=in_channels, kernel_size=config.conv_kernel_size
+ )
+
+ def unfolding(self, features: torch.Tensor) -> Tuple[torch.Tensor, Dict]:
+ patch_width, patch_height = self.patch_width, self.patch_height
+ patch_area = int(patch_width * patch_height)
+
+ batch_size, channels, orig_height, orig_width = features.shape
+
+ new_height = int(math.ceil(orig_height / patch_height) * patch_height)
+ new_width = int(math.ceil(orig_width / patch_width) * patch_width)
+
+ interpolate = False
+ if new_width != orig_width or new_height != orig_height:
+ # Note: Padding can be done, but then it needs to be handled in attention function.
+ features = nn.functional.interpolate(
+ features, size=(new_height, new_width), mode="bilinear", align_corners=False
+ )
+ interpolate = True
+
+ # number of patches along width and height
+ num_patch_width = new_width // patch_width
+ num_patch_height = new_height // patch_height
+ num_patches = num_patch_height * num_patch_width
+
+ # convert from shape (batch_size, channels, orig_height, orig_width)
+ # to the shape (batch_size * patch_area, num_patches, channels)
+ patches = features.reshape(
+ batch_size * channels * num_patch_height, patch_height, num_patch_width, patch_width
+ )
+ patches = patches.transpose(1, 2)
+ patches = patches.reshape(batch_size, channels, num_patches, patch_area)
+ patches = patches.transpose(1, 3)
+ patches = patches.reshape(batch_size * patch_area, num_patches, -1)
+
+ info_dict = {
+ "orig_size": (orig_height, orig_width),
+ "batch_size": batch_size,
+ "channels": channels,
+ "interpolate": interpolate,
+ "num_patches": num_patches,
+ "num_patches_width": num_patch_width,
+ "num_patches_height": num_patch_height,
+ }
+ return patches, info_dict
+
+ def folding(self, patches: torch.Tensor, info_dict: Dict) -> torch.Tensor:
+ patch_width, patch_height = self.patch_width, self.patch_height
+ patch_area = int(patch_width * patch_height)
+
+ batch_size = info_dict["batch_size"]
+ channels = info_dict["channels"]
+ num_patches = info_dict["num_patches"]
+ num_patch_height = info_dict["num_patches_height"]
+ num_patch_width = info_dict["num_patches_width"]
+
+ # convert from shape (batch_size * patch_area, num_patches, channels)
+ # back to shape (batch_size, channels, orig_height, orig_width)
+ features = patches.contiguous().view(batch_size, patch_area, num_patches, -1)
+ features = features.transpose(1, 3)
+ features = features.reshape(
+ batch_size * channels * num_patch_height, num_patch_width, patch_height, patch_width
+ )
+ features = features.transpose(1, 2)
+ features = features.reshape(
+ batch_size, channels, num_patch_height * patch_height, num_patch_width * patch_width
+ )
+
+ if info_dict["interpolate"]:
+ features = nn.functional.interpolate(
+ features, size=info_dict["orig_size"], mode="bilinear", align_corners=False
+ )
+
+ return features
+
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
+ # reduce spatial dimensions if needed
+ if self.downsampling_layer:
+ features = self.downsampling_layer(features)
+
+ residual = features
+
+ # local representation
+ features = self.conv_kxk(features)
+ features = self.conv_1x1(features)
+
+ # convert feature map to patches
+ patches, info_dict = self.unfolding(features)
+
+ # learn global representations
+ patches = self.transformer(patches)
+ patches = self.layernorm(patches)
+
+ # convert patches back to feature maps
+ features = self.folding(patches, info_dict)
+
+ features = self.conv_projection(features)
+ features = self.fusion(torch.cat((residual, features), dim=1))
+ return features
+
+
+class MobileViTEncoder(nn.Module):
+ def __init__(self, config: MobileViTConfig) -> None:
+ super().__init__()
+ self.config = config
+
+ self.layer = nn.ModuleList()
+ self.gradient_checkpointing = False
+
+ # segmentation architectures like DeepLab and PSPNet modify the strides
+ # of the classification backbones
+ dilate_layer_4 = dilate_layer_5 = False
+ if config.output_stride == 8:
+ dilate_layer_4 = True
+ dilate_layer_5 = True
+ elif config.output_stride == 16:
+ dilate_layer_5 = True
+
+ dilation = 1
+
+ layer_1 = MobileViTMobileNetLayer(
+ config,
+ in_channels=config.neck_hidden_sizes[0],
+ out_channels=config.neck_hidden_sizes[1],
+ stride=1,
+ num_stages=1,
+ )
+ self.layer.append(layer_1)
+
+ layer_2 = MobileViTMobileNetLayer(
+ config,
+ in_channels=config.neck_hidden_sizes[1],
+ out_channels=config.neck_hidden_sizes[2],
+ stride=2,
+ num_stages=3,
+ )
+ self.layer.append(layer_2)
+
+ layer_3 = MobileViTLayer(
+ config,
+ in_channels=config.neck_hidden_sizes[2],
+ out_channels=config.neck_hidden_sizes[3],
+ stride=2,
+ hidden_size=config.hidden_sizes[0],
+ num_stages=2,
+ )
+ self.layer.append(layer_3)
+
+ if dilate_layer_4:
+ dilation *= 2
+
+ layer_4 = MobileViTLayer(
+ config,
+ in_channels=config.neck_hidden_sizes[3],
+ out_channels=config.neck_hidden_sizes[4],
+ stride=2,
+ hidden_size=config.hidden_sizes[1],
+ num_stages=4,
+ dilation=dilation,
+ )
+ self.layer.append(layer_4)
+
+ if dilate_layer_5:
+ dilation *= 2
+
+ layer_5 = MobileViTLayer(
+ config,
+ in_channels=config.neck_hidden_sizes[4],
+ out_channels=config.neck_hidden_sizes[5],
+ stride=2,
+ hidden_size=config.hidden_sizes[2],
+ num_stages=3,
+ dilation=dilation,
+ )
+ self.layer.append(layer_5)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ) -> Union[tuple, BaseModelOutputWithNoAttention]:
+ all_hidden_states = () if output_hidden_states else None
+
+ for i, layer_module in enumerate(self.layer):
+ if self.gradient_checkpointing and self.training:
+ hidden_states = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ )
+ else:
+ hidden_states = layer_module(hidden_states)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
+
+ return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
+
+
+class MobileViTPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = MobileViTConfig
+ base_model_prefix = "mobilevit"
+ main_input_name = "pixel_values"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
+ """Initialize the weights"""
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+MOBILEVIT_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`MobileViTConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+MOBILEVIT_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
+ [`MobileViTImageProcessor.__call__`] for details.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare MobileViT model outputting raw hidden-states without any specific head on top.",
+ MOBILEVIT_START_DOCSTRING,
+)
+class MobileViTModel(MobileViTPreTrainedModel):
+ def __init__(self, config: MobileViTConfig, expand_output: bool = True):
+ super().__init__(config)
+ self.config = config
+ self.expand_output = expand_output
+
+ self.conv_stem = MobileViTConvLayer(
+ config,
+ in_channels=config.num_channels,
+ out_channels=config.neck_hidden_sizes[0],
+ kernel_size=3,
+ stride=2,
+ )
+
+ self.encoder = MobileViTEncoder(config)
+
+ if self.expand_output:
+ self.conv_1x1_exp = MobileViTConvLayer(
+ config,
+ in_channels=config.neck_hidden_sizes[5],
+ out_channels=config.neck_hidden_sizes[6],
+ kernel_size=1,
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def _prune_heads(self, heads_to_prune):
+ """Prunes heads of the model.
+ heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel
+ """
+ for layer_index, heads in heads_to_prune.items():
+ mobilevit_layer = self.encoder.layer[layer_index]
+ if isinstance(mobilevit_layer, MobileViTLayer):
+ for transformer_layer in mobilevit_layer.transformer.layer:
+ transformer_layer.attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(MOBILEVIT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithPoolingAndNoAttention,
+ config_class=_CONFIG_FOR_DOC,
+ modality="vision",
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
+ )
+ def forward(
+ self,
+ pixel_values: Optional[torch.Tensor] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ embedding_output = self.conv_stem(pixel_values)
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ if self.expand_output:
+ last_hidden_state = self.conv_1x1_exp(encoder_outputs[0])
+
+ # global average pooling: (batch_size, channels, height, width) -> (batch_size, channels)
+ pooled_output = torch.mean(last_hidden_state, dim=[-2, -1], keepdim=False)
+ else:
+ last_hidden_state = encoder_outputs[0]
+ pooled_output = None
+
+ if not return_dict:
+ output = (last_hidden_state, pooled_output) if pooled_output is not None else (last_hidden_state,)
+ return output + encoder_outputs[1:]
+
+ return BaseModelOutputWithPoolingAndNoAttention(
+ last_hidden_state=last_hidden_state,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ )
+
+
+@add_start_docstrings(
+ """
+ MobileViT model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
+ ImageNet.
+ """,
+ MOBILEVIT_START_DOCSTRING,
+)
+class MobileViTForImageClassification(MobileViTPreTrainedModel):
+ def __init__(self, config: MobileViTConfig) -> None:
+ super().__init__(config)
+
+ self.num_labels = config.num_labels
+ self.mobilevit = MobileViTModel(config)
+
+ # Classifier head
+ self.dropout = nn.Dropout(config.classifier_dropout_prob, inplace=True)
+ self.classifier = (
+ nn.Linear(config.neck_hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(MOBILEVIT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
+ output_type=ImageClassifierOutputWithNoAttention,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
+ )
+ def forward(
+ self,
+ pixel_values: Optional[torch.Tensor] = None,
+ output_hidden_states: Optional[bool] = None,
+ labels: Optional[torch.Tensor] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.mobilevit(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
+
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
+
+ logits = self.classifier(self.dropout(pooled_output))
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return ImageClassifierOutputWithNoAttention(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ )
+
+
+class MobileViTASPPPooling(nn.Module):
+ def __init__(self, config: MobileViTConfig, in_channels: int, out_channels: int) -> None:
+ super().__init__()
+
+ self.global_pool = nn.AdaptiveAvgPool2d(output_size=1)
+
+ self.conv_1x1 = MobileViTConvLayer(
+ config,
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=1,
+ stride=1,
+ use_normalization=True,
+ use_activation="relu",
+ )
+
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
+ spatial_size = features.shape[-2:]
+ features = self.global_pool(features)
+ features = self.conv_1x1(features)
+ features = nn.functional.interpolate(features, size=spatial_size, mode="bilinear", align_corners=False)
+ return features
+
+
+class MobileViTASPP(nn.Module):
+ """
+ ASPP module defined in DeepLab papers: https://arxiv.org/abs/1606.00915, https://arxiv.org/abs/1706.05587
+ """
+
+ def __init__(self, config: MobileViTConfig) -> None:
+ super().__init__()
+
+ in_channels = config.neck_hidden_sizes[-2]
+ out_channels = config.aspp_out_channels
+
+ if len(config.atrous_rates) != 3:
+ raise ValueError("Expected 3 values for atrous_rates")
+
+ self.convs = nn.ModuleList()
+
+ in_projection = MobileViTConvLayer(
+ config,
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=1,
+ use_activation="relu",
+ )
+ self.convs.append(in_projection)
+
+ self.convs.extend(
+ [
+ MobileViTConvLayer(
+ config,
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=3,
+ dilation=rate,
+ use_activation="relu",
+ )
+ for rate in config.atrous_rates
+ ]
+ )
+
+ pool_layer = MobileViTASPPPooling(config, in_channels, out_channels)
+ self.convs.append(pool_layer)
+
+ self.project = MobileViTConvLayer(
+ config, in_channels=5 * out_channels, out_channels=out_channels, kernel_size=1, use_activation="relu"
+ )
+
+ self.dropout = nn.Dropout(p=config.aspp_dropout_prob)
+
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
+ pyramid = []
+ for conv in self.convs:
+ pyramid.append(conv(features))
+ pyramid = torch.cat(pyramid, dim=1)
+
+ pooled_features = self.project(pyramid)
+ pooled_features = self.dropout(pooled_features)
+ return pooled_features
+
+
+class MobileViTDeepLabV3(nn.Module):
+ """
+ DeepLabv3 architecture: https://arxiv.org/abs/1706.05587
+ """
+
+ def __init__(self, config: MobileViTConfig) -> None:
+ super().__init__()
+ self.aspp = MobileViTASPP(config)
+
+ self.dropout = nn.Dropout2d(config.classifier_dropout_prob)
+
+ self.classifier = MobileViTConvLayer(
+ config,
+ in_channels=config.aspp_out_channels,
+ out_channels=config.num_labels,
+ kernel_size=1,
+ use_normalization=False,
+ use_activation=False,
+ bias=True,
+ )
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ features = self.aspp(hidden_states[-1])
+ features = self.dropout(features)
+ features = self.classifier(features)
+ return features
+
+
+@add_start_docstrings(
+ """
+ MobileViT model with a semantic segmentation head on top, e.g. for Pascal VOC.
+ """,
+ MOBILEVIT_START_DOCSTRING,
+)
+class MobileViTForSemanticSegmentation(MobileViTPreTrainedModel):
+ def __init__(self, config: MobileViTConfig) -> None:
+ super().__init__(config)
+
+ self.num_labels = config.num_labels
+ self.mobilevit = MobileViTModel(config, expand_output=False)
+ self.segmentation_head = MobileViTDeepLabV3(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(MOBILEVIT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=SemanticSegmenterOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[tuple, SemanticSegmenterOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
+ Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> import requests
+ >>> import torch
+ >>> from PIL import Image
+ >>> from transformers import AutoImageProcessor, MobileViTForSemanticSegmentation
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-small")
+ >>> model = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-small")
+
+ >>> inputs = image_processor(images=image, return_tensors="pt")
+
+ >>> with torch.no_grad():
+ ... outputs = model(**inputs)
+
+ >>> # logits are of shape (batch_size, num_labels, height, width)
+ >>> logits = outputs.logits
+ ```"""
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.mobilevit(
+ pixel_values,
+ output_hidden_states=True, # we need the intermediate hidden states
+ return_dict=return_dict,
+ )
+
+ encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1]
+
+ logits = self.segmentation_head(encoder_hidden_states)
+
+ loss = None
+ if labels is not None:
+ if self.config.num_labels == 1:
+ raise ValueError("The number of labels should be greater than one")
+ else:
+ # upsample logits to the images' original size
+ upsampled_logits = nn.functional.interpolate(
+ logits, size=labels.shape[-2:], mode="bilinear", align_corners=False
+ )
+ loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index)
+ loss = loss_fct(upsampled_logits, labels)
+
+ if not return_dict:
+ if output_hidden_states:
+ output = (logits,) + outputs[1:]
+ else:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SemanticSegmenterOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
+ attentions=None,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mobilevit/modeling_tf_mobilevit.py b/venv/lib/python3.10/site-packages/transformers/models/mobilevit/modeling_tf_mobilevit.py
new file mode 100644
index 0000000000000000000000000000000000000000..8434c9685e570f31dcf8b0881cebe4c3304af2e5
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/mobilevit/modeling_tf_mobilevit.py
@@ -0,0 +1,1373 @@
+# coding=utf-8
+# Copyright 2022 Apple Inc. and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Original license: https://github.com/apple/ml-cvnets/blob/main/LICENSE
+""" TensorFlow 2.0 MobileViT model."""
+
+from __future__ import annotations
+
+from typing import Dict, Optional, Tuple, Union
+
+import tensorflow as tf
+
+from ...activations_tf import get_tf_activation
+from ...file_utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ replace_return_docstrings,
+)
+from ...modeling_tf_outputs import (
+ TFBaseModelOutput,
+ TFBaseModelOutputWithPooling,
+ TFImageClassifierOutputWithNoAttention,
+ TFSemanticSegmenterOutputWithNoAttention,
+)
+from ...modeling_tf_utils import (
+ TFPreTrainedModel,
+ TFSequenceClassificationLoss,
+ keras,
+ keras_serializable,
+ unpack_inputs,
+)
+from ...tf_utils import shape_list, stable_softmax
+from ...utils import logging
+from .configuration_mobilevit import MobileViTConfig
+
+
+logger = logging.get_logger(__name__)
+
+# General docstring
+_CONFIG_FOR_DOC = "MobileViTConfig"
+
+# Base docstring
+_CHECKPOINT_FOR_DOC = "apple/mobilevit-small"
+_EXPECTED_OUTPUT_SHAPE = [1, 640, 8, 8]
+
+# Image classification docstring
+_IMAGE_CLASS_CHECKPOINT = "apple/mobilevit-small"
+_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
+
+
+from ..deprecated._archive_maps import TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+def make_divisible(value: int, divisor: int = 8, min_value: Optional[int] = None) -> int:
+ """
+ Ensure that all layers have a channel count that is divisible by `divisor`. This function is taken from the
+ original TensorFlow repo. It can be seen here:
+ https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
+ """
+ if min_value is None:
+ min_value = divisor
+ new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)
+ # Make sure that round down does not go down by more than 10%.
+ if new_value < 0.9 * value:
+ new_value += divisor
+ return int(new_value)
+
+
+class TFMobileViTConvLayer(keras.layers.Layer):
+ def __init__(
+ self,
+ config: MobileViTConfig,
+ in_channels: int,
+ out_channels: int,
+ kernel_size: int,
+ stride: int = 1,
+ groups: int = 1,
+ bias: bool = False,
+ dilation: int = 1,
+ use_normalization: bool = True,
+ use_activation: Union[bool, str] = True,
+ **kwargs,
+ ) -> None:
+ super().__init__(**kwargs)
+ logger.warning(
+ f"\n{self.__class__.__name__} has backpropagation operations that are NOT supported on CPU. If you wish "
+ "to train/fine-tune this model, you need a GPU or a TPU"
+ )
+
+ padding = int((kernel_size - 1) / 2) * dilation
+ self.padding = keras.layers.ZeroPadding2D(padding)
+
+ if out_channels % groups != 0:
+ raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups.")
+
+ self.convolution = keras.layers.Conv2D(
+ filters=out_channels,
+ kernel_size=kernel_size,
+ strides=stride,
+ padding="VALID",
+ dilation_rate=dilation,
+ groups=groups,
+ use_bias=bias,
+ name="convolution",
+ )
+
+ if use_normalization:
+ self.normalization = keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.1, name="normalization")
+ else:
+ self.normalization = None
+
+ if use_activation:
+ if isinstance(use_activation, str):
+ self.activation = get_tf_activation(use_activation)
+ elif isinstance(config.hidden_act, str):
+ self.activation = get_tf_activation(config.hidden_act)
+ else:
+ self.activation = config.hidden_act
+ else:
+ self.activation = None
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+
+ def call(self, features: tf.Tensor, training: bool = False) -> tf.Tensor:
+ padded_features = self.padding(features)
+ features = self.convolution(padded_features)
+ if self.normalization is not None:
+ features = self.normalization(features, training=training)
+ if self.activation is not None:
+ features = self.activation(features)
+ return features
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "convolution", None) is not None:
+ with tf.name_scope(self.convolution.name):
+ self.convolution.build([None, None, None, self.in_channels])
+ if getattr(self, "normalization", None) is not None:
+ if hasattr(self.normalization, "name"):
+ with tf.name_scope(self.normalization.name):
+ self.normalization.build([None, None, None, self.out_channels])
+
+
+class TFMobileViTInvertedResidual(keras.layers.Layer):
+ """
+ Inverted residual block (MobileNetv2): https://arxiv.org/abs/1801.04381
+ """
+
+ def __init__(
+ self, config: MobileViTConfig, in_channels: int, out_channels: int, stride: int, dilation: int = 1, **kwargs
+ ) -> None:
+ super().__init__(**kwargs)
+ expanded_channels = make_divisible(int(round(in_channels * config.expand_ratio)), 8)
+
+ if stride not in [1, 2]:
+ raise ValueError(f"Invalid stride {stride}.")
+
+ self.use_residual = (stride == 1) and (in_channels == out_channels)
+
+ self.expand_1x1 = TFMobileViTConvLayer(
+ config, in_channels=in_channels, out_channels=expanded_channels, kernel_size=1, name="expand_1x1"
+ )
+
+ self.conv_3x3 = TFMobileViTConvLayer(
+ config,
+ in_channels=expanded_channels,
+ out_channels=expanded_channels,
+ kernel_size=3,
+ stride=stride,
+ groups=expanded_channels,
+ dilation=dilation,
+ name="conv_3x3",
+ )
+
+ self.reduce_1x1 = TFMobileViTConvLayer(
+ config,
+ in_channels=expanded_channels,
+ out_channels=out_channels,
+ kernel_size=1,
+ use_activation=False,
+ name="reduce_1x1",
+ )
+
+ def call(self, features: tf.Tensor, training: bool = False) -> tf.Tensor:
+ residual = features
+
+ features = self.expand_1x1(features, training=training)
+ features = self.conv_3x3(features, training=training)
+ features = self.reduce_1x1(features, training=training)
+
+ return residual + features if self.use_residual else features
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "expand_1x1", None) is not None:
+ with tf.name_scope(self.expand_1x1.name):
+ self.expand_1x1.build(None)
+ if getattr(self, "conv_3x3", None) is not None:
+ with tf.name_scope(self.conv_3x3.name):
+ self.conv_3x3.build(None)
+ if getattr(self, "reduce_1x1", None) is not None:
+ with tf.name_scope(self.reduce_1x1.name):
+ self.reduce_1x1.build(None)
+
+
+class TFMobileViTMobileNetLayer(keras.layers.Layer):
+ def __init__(
+ self,
+ config: MobileViTConfig,
+ in_channels: int,
+ out_channels: int,
+ stride: int = 1,
+ num_stages: int = 1,
+ **kwargs,
+ ) -> None:
+ super().__init__(**kwargs)
+
+ self.layers = []
+ for i in range(num_stages):
+ layer = TFMobileViTInvertedResidual(
+ config,
+ in_channels=in_channels,
+ out_channels=out_channels,
+ stride=stride if i == 0 else 1,
+ name=f"layer.{i}",
+ )
+ self.layers.append(layer)
+ in_channels = out_channels
+
+ def call(self, features: tf.Tensor, training: bool = False) -> tf.Tensor:
+ for layer_module in self.layers:
+ features = layer_module(features, training=training)
+ return features
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "layers", None) is not None:
+ for layer_module in self.layers:
+ with tf.name_scope(layer_module.name):
+ layer_module.build(None)
+
+
+class TFMobileViTSelfAttention(keras.layers.Layer):
+ def __init__(self, config: MobileViTConfig, hidden_size: int, **kwargs) -> None:
+ super().__init__(**kwargs)
+
+ if hidden_size % config.num_attention_heads != 0:
+ raise ValueError(
+ f"The hidden size {hidden_size,} is not a multiple of the number of attention "
+ f"heads {config.num_attention_heads}."
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+ scale = tf.cast(self.attention_head_size, dtype=tf.float32)
+ self.scale = tf.math.sqrt(scale)
+
+ self.query = keras.layers.Dense(self.all_head_size, use_bias=config.qkv_bias, name="query")
+ self.key = keras.layers.Dense(self.all_head_size, use_bias=config.qkv_bias, name="key")
+ self.value = keras.layers.Dense(self.all_head_size, use_bias=config.qkv_bias, name="value")
+
+ self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob)
+ self.hidden_size = hidden_size
+
+ def transpose_for_scores(self, x: tf.Tensor) -> tf.Tensor:
+ batch_size = tf.shape(x)[0]
+ x = tf.reshape(x, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
+ return tf.transpose(x, perm=[0, 2, 1, 3])
+
+ def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
+ batch_size = tf.shape(hidden_states)[0]
+
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ query_layer = self.transpose_for_scores(self.query(hidden_states))
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
+ attention_scores = attention_scores / self.scale
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = stable_softmax(attention_scores, axis=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs, training=training)
+
+ context_layer = tf.matmul(attention_probs, value_layer)
+
+ context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
+ context_layer = tf.reshape(context_layer, shape=(batch_size, -1, self.all_head_size))
+ return context_layer
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "query", None) is not None:
+ with tf.name_scope(self.query.name):
+ self.query.build([None, None, self.hidden_size])
+ if getattr(self, "key", None) is not None:
+ with tf.name_scope(self.key.name):
+ self.key.build([None, None, self.hidden_size])
+ if getattr(self, "value", None) is not None:
+ with tf.name_scope(self.value.name):
+ self.value.build([None, None, self.hidden_size])
+
+
+class TFMobileViTSelfOutput(keras.layers.Layer):
+ def __init__(self, config: MobileViTConfig, hidden_size: int, **kwargs) -> None:
+ super().__init__(**kwargs)
+ self.dense = keras.layers.Dense(hidden_size, name="dense")
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
+ self.hidden_size = hidden_size
+
+ def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states, training=training)
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.hidden_size])
+
+
+class TFMobileViTAttention(keras.layers.Layer):
+ def __init__(self, config: MobileViTConfig, hidden_size: int, **kwargs) -> None:
+ super().__init__(**kwargs)
+ self.attention = TFMobileViTSelfAttention(config, hidden_size, name="attention")
+ self.dense_output = TFMobileViTSelfOutput(config, hidden_size, name="output")
+
+ def prune_heads(self, heads):
+ raise NotImplementedError
+
+ def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
+ self_outputs = self.attention(hidden_states, training=training)
+ attention_output = self.dense_output(self_outputs, training=training)
+ return attention_output
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "attention", None) is not None:
+ with tf.name_scope(self.attention.name):
+ self.attention.build(None)
+ if getattr(self, "dense_output", None) is not None:
+ with tf.name_scope(self.dense_output.name):
+ self.dense_output.build(None)
+
+
+class TFMobileViTIntermediate(keras.layers.Layer):
+ def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int, **kwargs) -> None:
+ super().__init__(**kwargs)
+ self.dense = keras.layers.Dense(intermediate_size, name="dense")
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
+ else:
+ self.intermediate_act_fn = config.hidden_act
+ self.hidden_size = hidden_size
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.hidden_size])
+
+
+class TFMobileViTOutput(keras.layers.Layer):
+ def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int, **kwargs) -> None:
+ super().__init__(**kwargs)
+ self.dense = keras.layers.Dense(hidden_size, name="dense")
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
+ self.intermediate_size = intermediate_size
+
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = hidden_states + input_tensor
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.intermediate_size])
+
+
+class TFMobileViTTransformerLayer(keras.layers.Layer):
+ def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int, **kwargs) -> None:
+ super().__init__(**kwargs)
+ self.attention = TFMobileViTAttention(config, hidden_size, name="attention")
+ self.intermediate = TFMobileViTIntermediate(config, hidden_size, intermediate_size, name="intermediate")
+ self.mobilevit_output = TFMobileViTOutput(config, hidden_size, intermediate_size, name="output")
+ self.layernorm_before = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm_before")
+ self.layernorm_after = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm_after")
+ self.hidden_size = hidden_size
+
+ def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
+ attention_output = self.attention(self.layernorm_before(hidden_states), training=training)
+ hidden_states = attention_output + hidden_states
+
+ layer_output = self.layernorm_after(hidden_states)
+ layer_output = self.intermediate(layer_output)
+ layer_output = self.mobilevit_output(layer_output, hidden_states, training=training)
+ return layer_output
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "attention", None) is not None:
+ with tf.name_scope(self.attention.name):
+ self.attention.build(None)
+ if getattr(self, "intermediate", None) is not None:
+ with tf.name_scope(self.intermediate.name):
+ self.intermediate.build(None)
+ if getattr(self, "mobilevit_output", None) is not None:
+ with tf.name_scope(self.mobilevit_output.name):
+ self.mobilevit_output.build(None)
+ if getattr(self, "layernorm_before", None) is not None:
+ with tf.name_scope(self.layernorm_before.name):
+ self.layernorm_before.build([None, None, self.hidden_size])
+ if getattr(self, "layernorm_after", None) is not None:
+ with tf.name_scope(self.layernorm_after.name):
+ self.layernorm_after.build([None, None, self.hidden_size])
+
+
+class TFMobileViTTransformer(keras.layers.Layer):
+ def __init__(self, config: MobileViTConfig, hidden_size: int, num_stages: int, **kwargs) -> None:
+ super().__init__(**kwargs)
+
+ self.layers = []
+ for i in range(num_stages):
+ transformer_layer = TFMobileViTTransformerLayer(
+ config,
+ hidden_size=hidden_size,
+ intermediate_size=int(hidden_size * config.mlp_ratio),
+ name=f"layer.{i}",
+ )
+ self.layers.append(transformer_layer)
+
+ def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
+ for layer_module in self.layers:
+ hidden_states = layer_module(hidden_states, training=training)
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "layers", None) is not None:
+ for layer_module in self.layers:
+ with tf.name_scope(layer_module.name):
+ layer_module.build(None)
+
+
+class TFMobileViTLayer(keras.layers.Layer):
+ """
+ MobileViT block: https://arxiv.org/abs/2110.02178
+ """
+
+ def __init__(
+ self,
+ config: MobileViTConfig,
+ in_channels: int,
+ out_channels: int,
+ stride: int,
+ hidden_size: int,
+ num_stages: int,
+ dilation: int = 1,
+ **kwargs,
+ ) -> None:
+ super().__init__(**kwargs)
+ self.patch_width = config.patch_size
+ self.patch_height = config.patch_size
+
+ if stride == 2:
+ self.downsampling_layer = TFMobileViTInvertedResidual(
+ config,
+ in_channels=in_channels,
+ out_channels=out_channels,
+ stride=stride if dilation == 1 else 1,
+ dilation=dilation // 2 if dilation > 1 else 1,
+ name="downsampling_layer",
+ )
+ in_channels = out_channels
+ else:
+ self.downsampling_layer = None
+
+ self.conv_kxk = TFMobileViTConvLayer(
+ config,
+ in_channels=in_channels,
+ out_channels=in_channels,
+ kernel_size=config.conv_kernel_size,
+ name="conv_kxk",
+ )
+
+ self.conv_1x1 = TFMobileViTConvLayer(
+ config,
+ in_channels=in_channels,
+ out_channels=hidden_size,
+ kernel_size=1,
+ use_normalization=False,
+ use_activation=False,
+ name="conv_1x1",
+ )
+
+ self.transformer = TFMobileViTTransformer(
+ config, hidden_size=hidden_size, num_stages=num_stages, name="transformer"
+ )
+
+ self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
+
+ self.conv_projection = TFMobileViTConvLayer(
+ config, in_channels=hidden_size, out_channels=in_channels, kernel_size=1, name="conv_projection"
+ )
+
+ self.fusion = TFMobileViTConvLayer(
+ config,
+ in_channels=2 * in_channels,
+ out_channels=in_channels,
+ kernel_size=config.conv_kernel_size,
+ name="fusion",
+ )
+ self.hidden_size = hidden_size
+
+ def unfolding(self, features: tf.Tensor) -> Tuple[tf.Tensor, Dict]:
+ patch_width, patch_height = self.patch_width, self.patch_height
+ patch_area = tf.cast(patch_width * patch_height, "int32")
+
+ batch_size = tf.shape(features)[0]
+ orig_height = tf.shape(features)[1]
+ orig_width = tf.shape(features)[2]
+ channels = tf.shape(features)[3]
+
+ new_height = tf.cast(tf.math.ceil(orig_height / patch_height) * patch_height, "int32")
+ new_width = tf.cast(tf.math.ceil(orig_width / patch_width) * patch_width, "int32")
+
+ interpolate = new_width != orig_width or new_height != orig_height
+ if interpolate:
+ # Note: Padding can be done, but then it needs to be handled in attention function.
+ features = tf.image.resize(features, size=(new_height, new_width), method="bilinear")
+
+ # number of patches along width and height
+ num_patch_width = new_width // patch_width
+ num_patch_height = new_height // patch_height
+ num_patches = num_patch_height * num_patch_width
+
+ # convert from shape (batch_size, orig_height, orig_width, channels)
+ # to the shape (batch_size * patch_area, num_patches, channels)
+ features = tf.transpose(features, [0, 3, 1, 2])
+ patches = tf.reshape(
+ features, (batch_size * channels * num_patch_height, patch_height, num_patch_width, patch_width)
+ )
+ patches = tf.transpose(patches, [0, 2, 1, 3])
+ patches = tf.reshape(patches, (batch_size, channels, num_patches, patch_area))
+ patches = tf.transpose(patches, [0, 3, 2, 1])
+ patches = tf.reshape(patches, (batch_size * patch_area, num_patches, channels))
+
+ info_dict = {
+ "orig_size": (orig_height, orig_width),
+ "batch_size": batch_size,
+ "channels": channels,
+ "interpolate": interpolate,
+ "num_patches": num_patches,
+ "num_patches_width": num_patch_width,
+ "num_patches_height": num_patch_height,
+ }
+ return patches, info_dict
+
+ def folding(self, patches: tf.Tensor, info_dict: Dict) -> tf.Tensor:
+ patch_width, patch_height = self.patch_width, self.patch_height
+ patch_area = int(patch_width * patch_height)
+
+ batch_size = info_dict["batch_size"]
+ channels = info_dict["channels"]
+ num_patches = info_dict["num_patches"]
+ num_patch_height = info_dict["num_patches_height"]
+ num_patch_width = info_dict["num_patches_width"]
+
+ # convert from shape (batch_size * patch_area, num_patches, channels)
+ # back to shape (batch_size, channels, orig_height, orig_width)
+ features = tf.reshape(patches, (batch_size, patch_area, num_patches, -1))
+ features = tf.transpose(features, perm=(0, 3, 2, 1))
+ features = tf.reshape(
+ features, (batch_size * channels * num_patch_height, num_patch_width, patch_height, patch_width)
+ )
+ features = tf.transpose(features, perm=(0, 2, 1, 3))
+ features = tf.reshape(
+ features, (batch_size, channels, num_patch_height * patch_height, num_patch_width * patch_width)
+ )
+ features = tf.transpose(features, perm=(0, 2, 3, 1))
+
+ if info_dict["interpolate"]:
+ features = tf.image.resize(features, size=info_dict["orig_size"], method="bilinear")
+
+ return features
+
+ def call(self, features: tf.Tensor, training: bool = False) -> tf.Tensor:
+ # reduce spatial dimensions if needed
+ if self.downsampling_layer:
+ features = self.downsampling_layer(features, training=training)
+
+ residual = features
+
+ # local representation
+ features = self.conv_kxk(features, training=training)
+ features = self.conv_1x1(features, training=training)
+
+ # convert feature map to patches
+ patches, info_dict = self.unfolding(features)
+
+ # learn global representations
+ patches = self.transformer(patches, training=training)
+ patches = self.layernorm(patches)
+
+ # convert patches back to feature maps
+ features = self.folding(patches, info_dict)
+
+ features = self.conv_projection(features, training=training)
+ features = self.fusion(tf.concat([residual, features], axis=-1), training=training)
+ return features
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "conv_kxk", None) is not None:
+ with tf.name_scope(self.conv_kxk.name):
+ self.conv_kxk.build(None)
+ if getattr(self, "conv_1x1", None) is not None:
+ with tf.name_scope(self.conv_1x1.name):
+ self.conv_1x1.build(None)
+ if getattr(self, "transformer", None) is not None:
+ with tf.name_scope(self.transformer.name):
+ self.transformer.build(None)
+ if getattr(self, "layernorm", None) is not None:
+ with tf.name_scope(self.layernorm.name):
+ self.layernorm.build([None, None, self.hidden_size])
+ if getattr(self, "conv_projection", None) is not None:
+ with tf.name_scope(self.conv_projection.name):
+ self.conv_projection.build(None)
+ if getattr(self, "fusion", None) is not None:
+ with tf.name_scope(self.fusion.name):
+ self.fusion.build(None)
+ if getattr(self, "downsampling_layer", None) is not None:
+ with tf.name_scope(self.downsampling_layer.name):
+ self.downsampling_layer.build(None)
+
+
+class TFMobileViTEncoder(keras.layers.Layer):
+ def __init__(self, config: MobileViTConfig, **kwargs) -> None:
+ super().__init__(**kwargs)
+ self.config = config
+
+ self.layers = []
+
+ # segmentation architectures like DeepLab and PSPNet modify the strides
+ # of the classification backbones
+ dilate_layer_4 = dilate_layer_5 = False
+ if config.output_stride == 8:
+ dilate_layer_4 = True
+ dilate_layer_5 = True
+ elif config.output_stride == 16:
+ dilate_layer_5 = True
+
+ dilation = 1
+
+ layer_1 = TFMobileViTMobileNetLayer(
+ config,
+ in_channels=config.neck_hidden_sizes[0],
+ out_channels=config.neck_hidden_sizes[1],
+ stride=1,
+ num_stages=1,
+ name="layer.0",
+ )
+ self.layers.append(layer_1)
+
+ layer_2 = TFMobileViTMobileNetLayer(
+ config,
+ in_channels=config.neck_hidden_sizes[1],
+ out_channels=config.neck_hidden_sizes[2],
+ stride=2,
+ num_stages=3,
+ name="layer.1",
+ )
+ self.layers.append(layer_2)
+
+ layer_3 = TFMobileViTLayer(
+ config,
+ in_channels=config.neck_hidden_sizes[2],
+ out_channels=config.neck_hidden_sizes[3],
+ stride=2,
+ hidden_size=config.hidden_sizes[0],
+ num_stages=2,
+ name="layer.2",
+ )
+ self.layers.append(layer_3)
+
+ if dilate_layer_4:
+ dilation *= 2
+
+ layer_4 = TFMobileViTLayer(
+ config,
+ in_channels=config.neck_hidden_sizes[3],
+ out_channels=config.neck_hidden_sizes[4],
+ stride=2,
+ hidden_size=config.hidden_sizes[1],
+ num_stages=4,
+ dilation=dilation,
+ name="layer.3",
+ )
+ self.layers.append(layer_4)
+
+ if dilate_layer_5:
+ dilation *= 2
+
+ layer_5 = TFMobileViTLayer(
+ config,
+ in_channels=config.neck_hidden_sizes[4],
+ out_channels=config.neck_hidden_sizes[5],
+ stride=2,
+ hidden_size=config.hidden_sizes[2],
+ num_stages=3,
+ dilation=dilation,
+ name="layer.4",
+ )
+ self.layers.append(layer_5)
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ training: bool = False,
+ ) -> Union[tuple, TFBaseModelOutput]:
+ all_hidden_states = () if output_hidden_states else None
+
+ for i, layer_module in enumerate(self.layers):
+ hidden_states = layer_module(hidden_states, training=training)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
+
+ return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "layers", None) is not None:
+ for layer_module in self.layers:
+ with tf.name_scope(layer_module.name):
+ layer_module.build(None)
+
+
+@keras_serializable
+class TFMobileViTMainLayer(keras.layers.Layer):
+ config_class = MobileViTConfig
+
+ def __init__(self, config: MobileViTConfig, expand_output: bool = True, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+ self.expand_output = expand_output
+
+ self.conv_stem = TFMobileViTConvLayer(
+ config,
+ in_channels=config.num_channels,
+ out_channels=config.neck_hidden_sizes[0],
+ kernel_size=3,
+ stride=2,
+ name="conv_stem",
+ )
+
+ self.encoder = TFMobileViTEncoder(config, name="encoder")
+
+ if self.expand_output:
+ self.conv_1x1_exp = TFMobileViTConvLayer(
+ config,
+ in_channels=config.neck_hidden_sizes[5],
+ out_channels=config.neck_hidden_sizes[6],
+ kernel_size=1,
+ name="conv_1x1_exp",
+ )
+
+ self.pooler = keras.layers.GlobalAveragePooling2D(data_format="channels_first", name="pooler")
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ raise NotImplementedError
+
+ @unpack_inputs
+ def call(
+ self,
+ pixel_values: tf.Tensor | None = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[Tuple[tf.Tensor], TFBaseModelOutputWithPooling]:
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format.
+ # So change the input format from `NCHW` to `NHWC`.
+ # shape = (batch_size, in_height, in_width, in_channels=num_channels)
+ pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
+
+ embedding_output = self.conv_stem(pixel_values, training=training)
+
+ encoder_outputs = self.encoder(
+ embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training
+ )
+
+ if self.expand_output:
+ last_hidden_state = self.conv_1x1_exp(encoder_outputs[0])
+
+ # Change to NCHW output format to have uniformity in the modules
+ last_hidden_state = tf.transpose(last_hidden_state, perm=[0, 3, 1, 2])
+
+ # global average pooling: (batch_size, channels, height, width) -> (batch_size, channels)
+ pooled_output = self.pooler(last_hidden_state)
+ else:
+ last_hidden_state = encoder_outputs[0]
+ # Change to NCHW output format to have uniformity in the modules
+ last_hidden_state = tf.transpose(last_hidden_state, perm=[0, 3, 1, 2])
+ pooled_output = None
+
+ if not return_dict:
+ output = (last_hidden_state, pooled_output) if pooled_output is not None else (last_hidden_state,)
+
+ # Change to NCHW output format to have uniformity in the modules
+ if not self.expand_output:
+ remaining_encoder_outputs = encoder_outputs[1:]
+ remaining_encoder_outputs = tuple(
+ [tf.transpose(h, perm=(0, 3, 1, 2)) for h in remaining_encoder_outputs[0]]
+ )
+ remaining_encoder_outputs = (remaining_encoder_outputs,)
+ return output + remaining_encoder_outputs
+ else:
+ return output + encoder_outputs[1:]
+
+ # Change the other hidden state outputs to NCHW as well
+ if output_hidden_states:
+ hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
+
+ return TFBaseModelOutputWithPooling(
+ last_hidden_state=last_hidden_state,
+ pooler_output=pooled_output,
+ hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "conv_stem", None) is not None:
+ with tf.name_scope(self.conv_stem.name):
+ self.conv_stem.build(None)
+ if getattr(self, "encoder", None) is not None:
+ with tf.name_scope(self.encoder.name):
+ self.encoder.build(None)
+ if getattr(self, "pooler", None) is not None:
+ with tf.name_scope(self.pooler.name):
+ self.pooler.build([None, None, None, None])
+ if getattr(self, "conv_1x1_exp", None) is not None:
+ with tf.name_scope(self.conv_1x1_exp.name):
+ self.conv_1x1_exp.build(None)
+
+
+class TFMobileViTPreTrainedModel(TFPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = MobileViTConfig
+ base_model_prefix = "mobilevit"
+ main_input_name = "pixel_values"
+
+
+MOBILEVIT_START_DOCSTRING = r"""
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TensorFlow models and layers in `transformers` accept two formats as input:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional argument.
+
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
+ positional argument:
+
+ - a single Tensor with `pixel_values` only and nothing else: `model(pixel_values)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([pixel_values, attention_mask])` or `model([pixel_values, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})`
+
+ Note that when creating models and layers with
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
+ about any of this, as you can just pass inputs like you would to any other Python function!
+
+
+
+ Parameters:
+ config ([`MobileViTConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+MOBILEVIT_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]`, `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
+ [`MobileViTImageProcessor.__call__`] for details.
+
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+"""
+
+
+@add_start_docstrings(
+ "The bare MobileViT model outputting raw hidden-states without any specific head on top.",
+ MOBILEVIT_START_DOCSTRING,
+)
+class TFMobileViTModel(TFMobileViTPreTrainedModel):
+ def __init__(self, config: MobileViTConfig, expand_output: bool = True, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.config = config
+ self.expand_output = expand_output
+
+ self.mobilevit = TFMobileViTMainLayer(config, expand_output=expand_output, name="mobilevit")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(MOBILEVIT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFBaseModelOutputWithPooling,
+ config_class=_CONFIG_FOR_DOC,
+ modality="vision",
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
+ )
+ def call(
+ self,
+ pixel_values: tf.Tensor | None = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[Tuple[tf.Tensor], TFBaseModelOutputWithPooling]:
+ output = self.mobilevit(pixel_values, output_hidden_states, return_dict, training=training)
+ return output
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "mobilevit", None) is not None:
+ with tf.name_scope(self.mobilevit.name):
+ self.mobilevit.build(None)
+
+
+@add_start_docstrings(
+ """
+ MobileViT model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
+ ImageNet.
+ """,
+ MOBILEVIT_START_DOCSTRING,
+)
+class TFMobileViTForImageClassification(TFMobileViTPreTrainedModel, TFSequenceClassificationLoss):
+ def __init__(self, config: MobileViTConfig, *inputs, **kwargs) -> None:
+ super().__init__(config, *inputs, **kwargs)
+
+ self.num_labels = config.num_labels
+ self.mobilevit = TFMobileViTMainLayer(config, name="mobilevit")
+
+ # Classifier head
+ self.dropout = keras.layers.Dropout(config.classifier_dropout_prob)
+ self.classifier = (
+ keras.layers.Dense(config.num_labels, name="classifier") if config.num_labels > 0 else tf.identity
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(MOBILEVIT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
+ output_type=TFImageClassifierOutputWithNoAttention,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
+ )
+ def call(
+ self,
+ pixel_values: tf.Tensor | None = None,
+ output_hidden_states: Optional[bool] = None,
+ labels: tf.Tensor | None = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ ) -> Union[tuple, TFImageClassifierOutputWithNoAttention]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.mobilevit(
+ pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training
+ )
+
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
+
+ logits = self.classifier(self.dropout(pooled_output, training=training))
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "mobilevit", None) is not None:
+ with tf.name_scope(self.mobilevit.name):
+ self.mobilevit.build(None)
+ if getattr(self, "classifier", None) is not None:
+ if hasattr(self.classifier, "name"):
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.neck_hidden_sizes[-1]])
+
+
+class TFMobileViTASPPPooling(keras.layers.Layer):
+ def __init__(self, config: MobileViTConfig, in_channels: int, out_channels: int, **kwargs) -> None:
+ super().__init__(**kwargs)
+
+ self.global_pool = keras.layers.GlobalAveragePooling2D(keepdims=True, name="global_pool")
+
+ self.conv_1x1 = TFMobileViTConvLayer(
+ config,
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=1,
+ stride=1,
+ use_normalization=True,
+ use_activation="relu",
+ name="conv_1x1",
+ )
+
+ def call(self, features: tf.Tensor, training: bool = False) -> tf.Tensor:
+ spatial_size = shape_list(features)[1:-1]
+ features = self.global_pool(features)
+ features = self.conv_1x1(features, training=training)
+ features = tf.image.resize(features, size=spatial_size, method="bilinear")
+ return features
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "global_pool", None) is not None:
+ with tf.name_scope(self.global_pool.name):
+ self.global_pool.build([None, None, None, None])
+ if getattr(self, "conv_1x1", None) is not None:
+ with tf.name_scope(self.conv_1x1.name):
+ self.conv_1x1.build(None)
+
+
+class TFMobileViTASPP(keras.layers.Layer):
+ """
+ ASPP module defined in DeepLab papers: https://arxiv.org/abs/1606.00915, https://arxiv.org/abs/1706.05587
+ """
+
+ def __init__(self, config: MobileViTConfig, **kwargs) -> None:
+ super().__init__(**kwargs)
+
+ in_channels = config.neck_hidden_sizes[-2]
+ out_channels = config.aspp_out_channels
+
+ if len(config.atrous_rates) != 3:
+ raise ValueError("Expected 3 values for atrous_rates")
+
+ self.convs = []
+
+ in_projection = TFMobileViTConvLayer(
+ config,
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=1,
+ use_activation="relu",
+ name="convs.0",
+ )
+ self.convs.append(in_projection)
+
+ self.convs.extend(
+ [
+ TFMobileViTConvLayer(
+ config,
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=3,
+ dilation=rate,
+ use_activation="relu",
+ name=f"convs.{i + 1}",
+ )
+ for i, rate in enumerate(config.atrous_rates)
+ ]
+ )
+
+ pool_layer = TFMobileViTASPPPooling(
+ config, in_channels, out_channels, name=f"convs.{len(config.atrous_rates) + 1}"
+ )
+ self.convs.append(pool_layer)
+
+ self.project = TFMobileViTConvLayer(
+ config,
+ in_channels=5 * out_channels,
+ out_channels=out_channels,
+ kernel_size=1,
+ use_activation="relu",
+ name="project",
+ )
+
+ self.dropout = keras.layers.Dropout(config.aspp_dropout_prob)
+
+ def call(self, features: tf.Tensor, training: bool = False) -> tf.Tensor:
+ # since the hidden states were transposed to have `(batch_size, channels, height, width)`
+ # layout we transpose them back to have `(batch_size, height, width, channels)` layout.
+ features = tf.transpose(features, perm=[0, 2, 3, 1])
+ pyramid = []
+ for conv in self.convs:
+ pyramid.append(conv(features, training=training))
+ pyramid = tf.concat(pyramid, axis=-1)
+
+ pooled_features = self.project(pyramid, training=training)
+ pooled_features = self.dropout(pooled_features, training=training)
+ return pooled_features
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "project", None) is not None:
+ with tf.name_scope(self.project.name):
+ self.project.build(None)
+ if getattr(self, "convs", None) is not None:
+ for conv in self.convs:
+ with tf.name_scope(conv.name):
+ conv.build(None)
+
+
+class TFMobileViTDeepLabV3(keras.layers.Layer):
+ """
+ DeepLabv3 architecture: https://arxiv.org/abs/1706.05587
+ """
+
+ def __init__(self, config: MobileViTConfig, **kwargs) -> None:
+ super().__init__(**kwargs)
+ self.aspp = TFMobileViTASPP(config, name="aspp")
+
+ self.dropout = keras.layers.Dropout(config.classifier_dropout_prob)
+
+ self.classifier = TFMobileViTConvLayer(
+ config,
+ in_channels=config.aspp_out_channels,
+ out_channels=config.num_labels,
+ kernel_size=1,
+ use_normalization=False,
+ use_activation=False,
+ bias=True,
+ name="classifier",
+ )
+
+ def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
+ features = self.aspp(hidden_states[-1], training=training)
+ features = self.dropout(features, training=training)
+ features = self.classifier(features, training=training)
+ return features
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "aspp", None) is not None:
+ with tf.name_scope(self.aspp.name):
+ self.aspp.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build(None)
+
+
+@add_start_docstrings(
+ """
+ MobileViT model with a semantic segmentation head on top, e.g. for Pascal VOC.
+ """,
+ MOBILEVIT_START_DOCSTRING,
+)
+class TFMobileViTForSemanticSegmentation(TFMobileViTPreTrainedModel):
+ def __init__(self, config: MobileViTConfig, **kwargs) -> None:
+ super().__init__(config, **kwargs)
+
+ self.num_labels = config.num_labels
+ self.mobilevit = TFMobileViTMainLayer(config, expand_output=False, name="mobilevit")
+ self.segmentation_head = TFMobileViTDeepLabV3(config, name="segmentation_head")
+
+ def hf_compute_loss(self, logits, labels):
+ # upsample logits to the images' original size
+ # `labels` is of shape (batch_size, height, width)
+ label_interp_shape = shape_list(labels)[1:]
+
+ upsampled_logits = tf.image.resize(logits, size=label_interp_shape, method="bilinear")
+ # compute weighted loss
+ loss_fct = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction="none")
+
+ def masked_loss(real, pred):
+ unmasked_loss = loss_fct(real, pred)
+ mask = tf.cast(real != self.config.semantic_loss_ignore_index, dtype=unmasked_loss.dtype)
+ masked_loss = unmasked_loss * mask
+ # Reduction strategy in the similar spirit with
+ # https://github.com/huggingface/transformers/blob/main/src/transformers/modeling_tf_utils.py#L210
+ reduced_masked_loss = tf.reduce_sum(masked_loss) / tf.reduce_sum(mask)
+ return tf.reshape(reduced_masked_loss, (1,))
+
+ return masked_loss(labels, upsampled_logits)
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(MOBILEVIT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TFSemanticSegmenterOutputWithNoAttention, config_class=_CONFIG_FOR_DOC)
+ def call(
+ self,
+ pixel_values: tf.Tensor | None = None,
+ labels: tf.Tensor | None = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[tuple, TFSemanticSegmenterOutputWithNoAttention]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, height, width)`, *optional*):
+ Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, TFMobileViTForSemanticSegmentation
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-small")
+ >>> model = TFMobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-small")
+
+ >>> inputs = image_processor(images=image, return_tensors="tf")
+
+ >>> outputs = model(**inputs)
+
+ >>> # logits are of shape (batch_size, num_labels, height, width)
+ >>> logits = outputs.logits
+ ```"""
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.mobilevit(
+ pixel_values,
+ output_hidden_states=True, # we need the intermediate hidden states
+ return_dict=return_dict,
+ training=training,
+ )
+
+ encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1]
+
+ logits = self.segmentation_head(encoder_hidden_states, training=training)
+
+ loss = None
+ if labels is not None:
+ if not self.config.num_labels > 1:
+ raise ValueError("The number of labels should be greater than one")
+ else:
+ loss = self.hf_compute_loss(logits=logits, labels=labels)
+
+ # make logits of shape (batch_size, num_labels, height, width) to
+ # keep them consistent across APIs
+ logits = tf.transpose(logits, perm=[0, 3, 1, 2])
+
+ if not return_dict:
+ if output_hidden_states:
+ output = (logits,) + outputs[1:]
+ else:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFSemanticSegmenterOutputWithNoAttention(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "mobilevit", None) is not None:
+ with tf.name_scope(self.mobilevit.name):
+ self.mobilevit.build(None)
+ if getattr(self, "segmentation_head", None) is not None:
+ with tf.name_scope(self.segmentation_head.name):
+ self.segmentation_head.build(None)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/nat/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/nat/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..19ddb46e8266fa85d25a3d085f2de33bf1dd4603
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/nat/__init__.py
@@ -0,0 +1,56 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
+
+
+_import_structure = {"configuration_nat": ["NAT_PRETRAINED_CONFIG_ARCHIVE_MAP", "NatConfig"]}
+
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_nat"] = [
+ "NAT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "NatForImageClassification",
+ "NatModel",
+ "NatPreTrainedModel",
+ "NatBackbone",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_nat import NAT_PRETRAINED_CONFIG_ARCHIVE_MAP, NatConfig
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_nat import (
+ NAT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ NatBackbone,
+ NatForImageClassification,
+ NatModel,
+ NatPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/nat/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/nat/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e25a08fc8773f88ab2824806ee6eb445816b4552
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/nat/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/nat/__pycache__/configuration_nat.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/nat/__pycache__/configuration_nat.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f981f8c80ce4b90ea84980d37c20beb36ba96387
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/nat/__pycache__/configuration_nat.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/nat/__pycache__/modeling_nat.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/nat/__pycache__/modeling_nat.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6d9856d32f21ba515330878ec249c21c32ffb39f
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/nat/__pycache__/modeling_nat.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/nat/configuration_nat.py b/venv/lib/python3.10/site-packages/transformers/models/nat/configuration_nat.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb3b85a80c263b063e59716b62ff87d82c7d7496
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/nat/configuration_nat.py
@@ -0,0 +1,148 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Neighborhood Attention Transformer model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import NAT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class NatConfig(BackboneConfigMixin, PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`NatModel`]. It is used to instantiate a Nat model
+ according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the Nat
+ [shi-labs/nat-mini-in1k-224](https://huggingface.co/shi-labs/nat-mini-in1k-224) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ patch_size (`int`, *optional*, defaults to 4):
+ The size (resolution) of each patch. NOTE: Only patch size of 4 is supported at the moment.
+ num_channels (`int`, *optional*, defaults to 3):
+ The number of input channels.
+ embed_dim (`int`, *optional*, defaults to 64):
+ Dimensionality of patch embedding.
+ depths (`List[int]`, *optional*, defaults to `[3, 4, 6, 5]`):
+ Number of layers in each level of the encoder.
+ num_heads (`List[int]`, *optional*, defaults to `[2, 4, 8, 16]`):
+ Number of attention heads in each layer of the Transformer encoder.
+ kernel_size (`int`, *optional*, defaults to 7):
+ Neighborhood Attention kernel size.
+ mlp_ratio (`float`, *optional*, defaults to 3.0):
+ Ratio of MLP hidden dimensionality to embedding dimensionality.
+ qkv_bias (`bool`, *optional*, defaults to `True`):
+ Whether or not a learnable bias should be added to the queries, keys and values.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout probability for all fully connected layers in the embeddings and encoder.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ drop_path_rate (`float`, *optional*, defaults to 0.1):
+ Stochastic depth rate.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
+ `"selu"` and `"gelu_new"` are supported.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
+ The epsilon used by the layer normalization layers.
+ layer_scale_init_value (`float`, *optional*, defaults to 0.0):
+ The initial value for the layer scale. Disabled if <=0.
+ out_features (`List[str]`, *optional*):
+ If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
+ (depending on how many stages the model has). If unset and `out_indices` is set, will default to the
+ corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
+ same order as defined in the `stage_names` attribute.
+ out_indices (`List[int]`, *optional*):
+ If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
+ many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
+ If unset and `out_features` is unset, will default to the last stage. Must be in the
+ same order as defined in the `stage_names` attribute.
+
+ Example:
+
+ ```python
+ >>> from transformers import NatConfig, NatModel
+
+ >>> # Initializing a Nat shi-labs/nat-mini-in1k-224 style configuration
+ >>> configuration = NatConfig()
+
+ >>> # Initializing a model (with random weights) from the shi-labs/nat-mini-in1k-224 style configuration
+ >>> model = NatModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "nat"
+
+ attribute_map = {
+ "num_attention_heads": "num_heads",
+ "num_hidden_layers": "num_layers",
+ }
+
+ def __init__(
+ self,
+ patch_size=4,
+ num_channels=3,
+ embed_dim=64,
+ depths=[3, 4, 6, 5],
+ num_heads=[2, 4, 8, 16],
+ kernel_size=7,
+ mlp_ratio=3.0,
+ qkv_bias=True,
+ hidden_dropout_prob=0.0,
+ attention_probs_dropout_prob=0.0,
+ drop_path_rate=0.1,
+ hidden_act="gelu",
+ initializer_range=0.02,
+ layer_norm_eps=1e-5,
+ layer_scale_init_value=0.0,
+ out_features=None,
+ out_indices=None,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.embed_dim = embed_dim
+ self.depths = depths
+ self.num_layers = len(depths)
+ self.num_heads = num_heads
+ self.kernel_size = kernel_size
+ self.mlp_ratio = mlp_ratio
+ self.qkv_bias = qkv_bias
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.drop_path_rate = drop_path_rate
+ self.hidden_act = hidden_act
+ self.layer_norm_eps = layer_norm_eps
+ self.initializer_range = initializer_range
+ # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
+ # this indicates the channel dimension after the last stage of the model
+ self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1))
+ self.layer_scale_init_value = layer_scale_init_value
+ self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)]
+ self._out_features, self._out_indices = get_aligned_output_features_output_indices(
+ out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/nat/modeling_nat.py b/venv/lib/python3.10/site-packages/transformers/models/nat/modeling_nat.py
new file mode 100644
index 0000000000000000000000000000000000000000..2434b65161a47c2a69be86a7cb9f045c96c26b5c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/nat/modeling_nat.py
@@ -0,0 +1,956 @@
+# coding=utf-8
+# Copyright 2022 SHI Labs and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch Neighborhood Attention Transformer model."""
+
+
+import math
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import BackboneOutput
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ ModelOutput,
+ OptionalDependencyNotAvailable,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_natten_available,
+ logging,
+ replace_return_docstrings,
+ requires_backends,
+)
+from ...utils.backbone_utils import BackboneMixin
+from .configuration_nat import NatConfig
+
+
+if is_natten_available():
+ from natten.functional import natten2dav, natten2dqkrpb
+else:
+
+ def natten2dqkrpb(*args, **kwargs):
+ raise OptionalDependencyNotAvailable()
+
+ def natten2dav(*args, **kwargs):
+ raise OptionalDependencyNotAvailable()
+
+
+logger = logging.get_logger(__name__)
+
+# General docstring
+_CONFIG_FOR_DOC = "NatConfig"
+
+# Base docstring
+_CHECKPOINT_FOR_DOC = "shi-labs/nat-mini-in1k-224"
+_EXPECTED_OUTPUT_SHAPE = [1, 7, 7, 512]
+
+# Image classification docstring
+_IMAGE_CLASS_CHECKPOINT = "shi-labs/nat-mini-in1k-224"
+_IMAGE_CLASS_EXPECTED_OUTPUT = "tiger cat"
+
+
+from ..deprecated._archive_maps import NAT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+# drop_path and NatDropPath are from the timm library.
+
+
+@dataclass
+class NatEncoderOutput(ModelOutput):
+ """
+ Nat encoder's outputs, with potential hidden states and attentions.
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
+ shape `(batch_size, hidden_size, height, width)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
+ include the spatial dimensions.
+ """
+
+ last_hidden_state: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+ reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+@dataclass
+class NatModelOutput(ModelOutput):
+ """
+ Nat model's outputs that also contains a pooling of the last hidden states.
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed):
+ Average pooling of the last layer hidden-state.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
+ shape `(batch_size, hidden_size, height, width)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
+ include the spatial dimensions.
+ """
+
+ last_hidden_state: torch.FloatTensor = None
+ pooler_output: Optional[torch.FloatTensor] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+ reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+@dataclass
+class NatImageClassifierOutput(ModelOutput):
+ """
+ Nat outputs for image classification.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Classification (or regression if config.num_labels==1) loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
+ shape `(batch_size, hidden_size, height, width)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
+ include the spatial dimensions.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+ reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+class NatEmbeddings(nn.Module):
+ """
+ Construct the patch and position embeddings.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+
+ self.patch_embeddings = NatPatchEmbeddings(config)
+
+ self.norm = nn.LayerNorm(config.embed_dim)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, pixel_values: Optional[torch.FloatTensor]) -> Tuple[torch.Tensor]:
+ embeddings = self.patch_embeddings(pixel_values)
+ embeddings = self.norm(embeddings)
+
+ embeddings = self.dropout(embeddings)
+
+ return embeddings
+
+
+class NatPatchEmbeddings(nn.Module):
+ """
+ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
+ `hidden_states` (patch embeddings) of shape `(batch_size, height, width, hidden_size)` to be consumed by a
+ Transformer.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ patch_size = config.patch_size
+ num_channels, hidden_size = config.num_channels, config.embed_dim
+ self.num_channels = num_channels
+
+ if patch_size == 4:
+ pass
+ else:
+ # TODO: Support arbitrary patch sizes.
+ raise ValueError("Dinat only supports patch size of 4 at the moment.")
+
+ self.projection = nn.Sequential(
+ nn.Conv2d(self.num_channels, hidden_size // 2, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)),
+ nn.Conv2d(hidden_size // 2, hidden_size, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)),
+ )
+
+ def forward(self, pixel_values: Optional[torch.FloatTensor]) -> torch.Tensor:
+ _, num_channels, height, width = pixel_values.shape
+ if num_channels != self.num_channels:
+ raise ValueError(
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
+ )
+ embeddings = self.projection(pixel_values)
+ embeddings = embeddings.permute(0, 2, 3, 1)
+
+ return embeddings
+
+
+class NatDownsampler(nn.Module):
+ """
+ Convolutional Downsampling Layer.
+
+ Args:
+ dim (`int`):
+ Number of input channels.
+ norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
+ Normalization layer class.
+ """
+
+ def __init__(self, dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None:
+ super().__init__()
+ self.dim = dim
+ self.reduction = nn.Conv2d(dim, 2 * dim, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
+ self.norm = norm_layer(2 * dim)
+
+ def forward(self, input_feature: torch.Tensor) -> torch.Tensor:
+ input_feature = self.reduction(input_feature.permute(0, 3, 1, 2)).permute(0, 2, 3, 1)
+ input_feature = self.norm(input_feature)
+ return input_feature
+
+
+# Copied from transformers.models.beit.modeling_beit.drop_path
+def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
+ """
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
+
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
+ argument.
+ """
+ if drop_prob == 0.0 or not training:
+ return input
+ keep_prob = 1 - drop_prob
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
+ random_tensor.floor_() # binarize
+ output = input.div(keep_prob) * random_tensor
+ return output
+
+
+# Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->Nat
+class NatDropPath(nn.Module):
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
+
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
+ super().__init__()
+ self.drop_prob = drop_prob
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ return drop_path(hidden_states, self.drop_prob, self.training)
+
+ def extra_repr(self) -> str:
+ return "p={}".format(self.drop_prob)
+
+
+class NeighborhoodAttention(nn.Module):
+ def __init__(self, config, dim, num_heads, kernel_size):
+ super().__init__()
+ if dim % num_heads != 0:
+ raise ValueError(
+ f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})"
+ )
+
+ self.num_attention_heads = num_heads
+ self.attention_head_size = int(dim / num_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+ self.kernel_size = kernel_size
+
+ # rpb is learnable relative positional biases; same concept is used Swin.
+ self.rpb = nn.Parameter(torch.zeros(num_heads, (2 * self.kernel_size - 1), (2 * self.kernel_size - 1)))
+
+ self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
+ self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
+ self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+
+ def transpose_for_scores(self, x):
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 3, 1, 2, 4)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ query_layer = self.transpose_for_scores(self.query(hidden_states))
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+
+ # Apply the scale factor before computing attention weights. It's usually more efficient because
+ # attention weights are typically a bigger tensor compared to query.
+ # It gives identical results because scalars are commutable in matrix multiplication.
+ query_layer = query_layer / math.sqrt(self.attention_head_size)
+
+ # Compute NA between "query" and "key" to get the raw attention scores, and add relative positional biases.
+ attention_scores = natten2dqkrpb(query_layer, key_layer, self.rpb, self.kernel_size, 1)
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ context_layer = natten2dav(attention_probs, value_layer, self.kernel_size, 1)
+ context_layer = context_layer.permute(0, 2, 3, 1, 4).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ return outputs
+
+
+class NeighborhoodAttentionOutput(nn.Module):
+ def __init__(self, config, dim):
+ super().__init__()
+ self.dense = nn.Linear(dim, dim)
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ return hidden_states
+
+
+class NeighborhoodAttentionModule(nn.Module):
+ def __init__(self, config, dim, num_heads, kernel_size):
+ super().__init__()
+ self.self = NeighborhoodAttention(config, dim, num_heads, kernel_size)
+ self.output = NeighborhoodAttentionOutput(config, dim)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.self.query = prune_linear_layer(self.self.query, index)
+ self.self.key = prune_linear_layer(self.self.key, index)
+ self.self.value = prune_linear_layer(self.self.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ self_outputs = self.self(hidden_states, output_attentions)
+ attention_output = self.output(self_outputs[0], hidden_states)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+class NatIntermediate(nn.Module):
+ def __init__(self, config, dim):
+ super().__init__()
+ self.dense = nn.Linear(dim, int(config.mlp_ratio * dim))
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+class NatOutput(nn.Module):
+ def __init__(self, config, dim):
+ super().__init__()
+ self.dense = nn.Linear(int(config.mlp_ratio * dim), dim)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ return hidden_states
+
+
+class NatLayer(nn.Module):
+ def __init__(self, config, dim, num_heads, drop_path_rate=0.0):
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.kernel_size = config.kernel_size
+ self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps)
+ self.attention = NeighborhoodAttentionModule(config, dim, num_heads, kernel_size=self.kernel_size)
+ self.drop_path = NatDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
+ self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps)
+ self.intermediate = NatIntermediate(config, dim)
+ self.output = NatOutput(config, dim)
+ self.layer_scale_parameters = (
+ nn.Parameter(config.layer_scale_init_value * torch.ones((2, dim)), requires_grad=True)
+ if config.layer_scale_init_value > 0
+ else None
+ )
+
+ def maybe_pad(self, hidden_states, height, width):
+ window_size = self.kernel_size
+ pad_values = (0, 0, 0, 0, 0, 0)
+ if height < window_size or width < window_size:
+ pad_l = pad_t = 0
+ pad_r = max(0, window_size - width)
+ pad_b = max(0, window_size - height)
+ pad_values = (0, 0, pad_l, pad_r, pad_t, pad_b)
+ hidden_states = nn.functional.pad(hidden_states, pad_values)
+ return hidden_states, pad_values
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
+ batch_size, height, width, channels = hidden_states.size()
+ shortcut = hidden_states
+
+ hidden_states = self.layernorm_before(hidden_states)
+ # pad hidden_states if they are smaller than kernel size
+ hidden_states, pad_values = self.maybe_pad(hidden_states, height, width)
+
+ _, height_pad, width_pad, _ = hidden_states.shape
+
+ attention_outputs = self.attention(hidden_states, output_attentions=output_attentions)
+
+ attention_output = attention_outputs[0]
+
+ was_padded = pad_values[3] > 0 or pad_values[5] > 0
+ if was_padded:
+ attention_output = attention_output[:, :height, :width, :].contiguous()
+
+ if self.layer_scale_parameters is not None:
+ attention_output = self.layer_scale_parameters[0] * attention_output
+
+ hidden_states = shortcut + self.drop_path(attention_output)
+
+ layer_output = self.layernorm_after(hidden_states)
+ layer_output = self.output(self.intermediate(layer_output))
+
+ if self.layer_scale_parameters is not None:
+ layer_output = self.layer_scale_parameters[1] * layer_output
+
+ layer_output = hidden_states + self.drop_path(layer_output)
+
+ layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,)
+ return layer_outputs
+
+
+class NatStage(nn.Module):
+ def __init__(self, config, dim, depth, num_heads, drop_path_rate, downsample):
+ super().__init__()
+ self.config = config
+ self.dim = dim
+ self.layers = nn.ModuleList(
+ [
+ NatLayer(
+ config=config,
+ dim=dim,
+ num_heads=num_heads,
+ drop_path_rate=drop_path_rate[i],
+ )
+ for i in range(depth)
+ ]
+ )
+
+ # patch merging layer
+ if downsample is not None:
+ self.downsample = downsample(dim=dim, norm_layer=nn.LayerNorm)
+ else:
+ self.downsample = None
+
+ self.pointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ _, height, width, _ = hidden_states.size()
+ for i, layer_module in enumerate(self.layers):
+ layer_outputs = layer_module(hidden_states, output_attentions)
+ hidden_states = layer_outputs[0]
+
+ hidden_states_before_downsampling = hidden_states
+ if self.downsample is not None:
+ hidden_states = self.downsample(hidden_states_before_downsampling)
+
+ stage_outputs = (hidden_states, hidden_states_before_downsampling)
+
+ if output_attentions:
+ stage_outputs += layer_outputs[1:]
+ return stage_outputs
+
+
+class NatEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.num_levels = len(config.depths)
+ self.config = config
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))]
+ self.levels = nn.ModuleList(
+ [
+ NatStage(
+ config=config,
+ dim=int(config.embed_dim * 2**i_layer),
+ depth=config.depths[i_layer],
+ num_heads=config.num_heads[i_layer],
+ drop_path_rate=dpr[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])],
+ downsample=NatDownsampler if (i_layer < self.num_levels - 1) else None,
+ )
+ for i_layer in range(self.num_levels)
+ ]
+ )
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ output_attentions: Optional[bool] = False,
+ output_hidden_states: Optional[bool] = False,
+ output_hidden_states_before_downsampling: Optional[bool] = False,
+ return_dict: Optional[bool] = True,
+ ) -> Union[Tuple, NatEncoderOutput]:
+ all_hidden_states = () if output_hidden_states else None
+ all_reshaped_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ if output_hidden_states:
+ # rearrange b h w c -> b c h w
+ reshaped_hidden_state = hidden_states.permute(0, 3, 1, 2)
+ all_hidden_states += (hidden_states,)
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
+
+ for i, layer_module in enumerate(self.levels):
+ layer_outputs = layer_module(hidden_states, output_attentions)
+
+ hidden_states = layer_outputs[0]
+ hidden_states_before_downsampling = layer_outputs[1]
+
+ if output_hidden_states and output_hidden_states_before_downsampling:
+ # rearrange b h w c -> b c h w
+ reshaped_hidden_state = hidden_states_before_downsampling.permute(0, 3, 1, 2)
+ all_hidden_states += (hidden_states_before_downsampling,)
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
+ elif output_hidden_states and not output_hidden_states_before_downsampling:
+ # rearrange b h w c -> b c h w
+ reshaped_hidden_state = hidden_states.permute(0, 3, 1, 2)
+ all_hidden_states += (hidden_states,)
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
+
+ if output_attentions:
+ all_self_attentions += layer_outputs[2:]
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
+
+ return NatEncoderOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ reshaped_hidden_states=all_reshaped_hidden_states,
+ )
+
+
+class NatPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = NatConfig
+ base_model_prefix = "nat"
+ main_input_name = "pixel_values"
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+NAT_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`NatConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+NAT_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`]
+ for details.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare Nat Model transformer outputting raw hidden-states without any specific head on top.",
+ NAT_START_DOCSTRING,
+)
+class NatModel(NatPreTrainedModel):
+ def __init__(self, config, add_pooling_layer=True):
+ super().__init__(config)
+
+ requires_backends(self, ["natten"])
+
+ self.config = config
+ self.num_levels = len(config.depths)
+ self.num_features = int(config.embed_dim * 2 ** (self.num_levels - 1))
+
+ self.embeddings = NatEmbeddings(config)
+ self.encoder = NatEncoder(config)
+
+ self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps)
+ self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.patch_embeddings
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(NAT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=NatModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ modality="vision",
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
+ )
+ def forward(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, NatModelOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ embedding_output = self.embeddings(pixel_values)
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = encoder_outputs[0]
+ sequence_output = self.layernorm(sequence_output)
+
+ pooled_output = None
+ if self.pooler is not None:
+ pooled_output = self.pooler(sequence_output.flatten(1, 2).transpose(1, 2))
+ pooled_output = torch.flatten(pooled_output, 1)
+
+ if not return_dict:
+ output = (sequence_output, pooled_output) + encoder_outputs[1:]
+
+ return output
+
+ return NatModelOutput(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ reshaped_hidden_states=encoder_outputs.reshaped_hidden_states,
+ )
+
+
+@add_start_docstrings(
+ """
+ Nat Model transformer with an image classification head on top (a linear layer on top of the final hidden state of
+ the [CLS] token) e.g. for ImageNet.
+ """,
+ NAT_START_DOCSTRING,
+)
+class NatForImageClassification(NatPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ requires_backends(self, ["natten"])
+
+ self.num_labels = config.num_labels
+ self.nat = NatModel(config)
+
+ # Classifier head
+ self.classifier = (
+ nn.Linear(self.nat.num_features, config.num_labels) if config.num_labels > 0 else nn.Identity()
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(NAT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
+ output_type=NatImageClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
+ )
+ def forward(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, NatImageClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.nat(
+ pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = outputs[1]
+
+ logits = self.classifier(pooled_output)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return NatImageClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ reshaped_hidden_states=outputs.reshaped_hidden_states,
+ )
+
+
+@add_start_docstrings(
+ "NAT backbone, to be used with frameworks like DETR and MaskFormer.",
+ NAT_START_DOCSTRING,
+)
+class NatBackbone(NatPreTrainedModel, BackboneMixin):
+ def __init__(self, config):
+ super().__init__(config)
+ super()._init_backbone(config)
+
+ requires_backends(self, ["natten"])
+
+ self.embeddings = NatEmbeddings(config)
+ self.encoder = NatEncoder(config)
+ self.num_features = [config.embed_dim] + [int(config.embed_dim * 2**i) for i in range(len(config.depths))]
+
+ # Add layer norms to hidden states of out_features
+ hidden_states_norms = {}
+ for stage, num_channels in zip(self.out_features, self.channels):
+ hidden_states_norms[stage] = nn.LayerNorm(num_channels)
+ self.hidden_states_norms = nn.ModuleDict(hidden_states_norms)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.patch_embeddings
+
+ @add_start_docstrings_to_model_forward(NAT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: torch.Tensor,
+ output_hidden_states: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> BackboneOutput:
+ """
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, AutoBackbone
+ >>> import torch
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> processor = AutoImageProcessor.from_pretrained("shi-labs/nat-mini-in1k-224")
+ >>> model = AutoBackbone.from_pretrained(
+ ... "shi-labs/nat-mini-in1k-224", out_features=["stage1", "stage2", "stage3", "stage4"]
+ ... )
+
+ >>> inputs = processor(image, return_tensors="pt")
+
+ >>> outputs = model(**inputs)
+
+ >>> feature_maps = outputs.feature_maps
+ >>> list(feature_maps[-1].shape)
+ [1, 512, 7, 7]
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+
+ embedding_output = self.embeddings(pixel_values)
+
+ outputs = self.encoder(
+ embedding_output,
+ output_attentions=output_attentions,
+ output_hidden_states=True,
+ output_hidden_states_before_downsampling=True,
+ return_dict=True,
+ )
+
+ hidden_states = outputs.reshaped_hidden_states
+
+ feature_maps = ()
+ for stage, hidden_state in zip(self.stage_names, hidden_states):
+ if stage in self.out_features:
+ # TODO can we simplify this?
+ batch_size, num_channels, height, width = hidden_state.shape
+ hidden_state = hidden_state.permute(0, 2, 3, 1).contiguous()
+ hidden_state = hidden_state.view(batch_size, height * width, num_channels)
+ hidden_state = self.hidden_states_norms[stage](hidden_state)
+ hidden_state = hidden_state.view(batch_size, height, width, num_channels)
+ hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()
+ feature_maps += (hidden_state,)
+
+ if not return_dict:
+ output = (feature_maps,)
+ if output_hidden_states:
+ output += (outputs.hidden_states,)
+ return output
+
+ return BackboneOutput(
+ feature_maps=feature_maps,
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
+ attentions=outputs.attentions,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/superpoint/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/superpoint/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..313767c02dda89ccb6c3691c56843bb3559be7ca
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/superpoint/__init__.py
@@ -0,0 +1,77 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+# rely on isort to merge the imports
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
+
+
+_import_structure = {
+ "configuration_superpoint": [
+ "SUPERPOINT_PRETRAINED_CONFIG_ARCHIVE_MAP",
+ "SuperPointConfig",
+ ]
+}
+
+try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["image_processing_superpoint"] = ["SuperPointImageProcessor"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_superpoint"] = [
+ "SUPERPOINT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "SuperPointForKeypointDetection",
+ "SuperPointPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_superpoint import (
+ SUPERPOINT_PRETRAINED_CONFIG_ARCHIVE_MAP,
+ SuperPointConfig,
+ )
+
+ try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .image_processing_superpoint import SuperPointImageProcessor
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_superpoint import (
+ SUPERPOINT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ SuperPointForKeypointDetection,
+ SuperPointPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/superpoint/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/superpoint/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6815c7b7ebf1cca658d6ae0c8cd1deb9d768448e
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/superpoint/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/superpoint/__pycache__/configuration_superpoint.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/superpoint/__pycache__/configuration_superpoint.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..997bc393c40bde55bbfe5a68f6e09a65dc910c30
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/superpoint/__pycache__/configuration_superpoint.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/superpoint/__pycache__/convert_superpoint_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/superpoint/__pycache__/convert_superpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3340799940bb8f94b87227cf9fc8be525cfebdcd
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/superpoint/__pycache__/convert_superpoint_to_pytorch.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/superpoint/__pycache__/image_processing_superpoint.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/superpoint/__pycache__/image_processing_superpoint.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..49ae045a1c31be1d46548a09e572abadbfcd46d4
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/superpoint/__pycache__/image_processing_superpoint.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/superpoint/__pycache__/modeling_superpoint.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/superpoint/__pycache__/modeling_superpoint.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8991724c8fd878a1a4f6158511f1b228b8b71eaf
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/superpoint/__pycache__/modeling_superpoint.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/superpoint/configuration_superpoint.py b/venv/lib/python3.10/site-packages/transformers/models/superpoint/configuration_superpoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..5970a6e1b4134d08d1fa17f69bbf50316d341665
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/superpoint/configuration_superpoint.py
@@ -0,0 +1,91 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import List
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+SUPERPOINT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "magic-leap-community/superpoint": "https://huggingface.co/magic-leap-community/superpoint/blob/main/config.json"
+}
+
+
+class SuperPointConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`SuperPointForKeypointDetection`]. It is used to instantiate a
+ SuperPoint model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the SuperPoint
+ [magic-leap-community/superpoint](https://huggingface.co/magic-leap-community/superpoint) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ encoder_hidden_sizes (`List`, *optional*, defaults to `[64, 64, 128, 128]`):
+ The number of channels in each convolutional layer in the encoder.
+ decoder_hidden_size (`int`, *optional*, defaults to 256): The hidden size of the decoder.
+ keypoint_decoder_dim (`int`, *optional*, defaults to 65): The output dimension of the keypoint decoder.
+ descriptor_decoder_dim (`int`, *optional*, defaults to 256): The output dimension of the descriptor decoder.
+ keypoint_threshold (`float`, *optional*, defaults to 0.005):
+ The threshold to use for extracting keypoints.
+ max_keypoints (`int`, *optional*, defaults to -1):
+ The maximum number of keypoints to extract. If `-1`, will extract all keypoints.
+ nms_radius (`int`, *optional*, defaults to 4):
+ The radius for non-maximum suppression.
+ border_removal_distance (`int`, *optional*, defaults to 4):
+ The distance from the border to remove keypoints.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+
+ Example:
+ ```python
+ >>> from transformers import SuperPointConfig, SuperPointForKeypointDetection
+
+ >>> # Initializing a SuperPoint superpoint style configuration
+ >>> configuration = SuperPointConfig()
+ >>> # Initializing a model from the superpoint style configuration
+ >>> model = SuperPointForKeypointDetection(configuration)
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "superpoint"
+
+ def __init__(
+ self,
+ encoder_hidden_sizes: List[int] = [64, 64, 128, 128],
+ decoder_hidden_size: int = 256,
+ keypoint_decoder_dim: int = 65,
+ descriptor_decoder_dim: int = 256,
+ keypoint_threshold: float = 0.005,
+ max_keypoints: int = -1,
+ nms_radius: int = 4,
+ border_removal_distance: int = 4,
+ initializer_range=0.02,
+ **kwargs,
+ ):
+ self.encoder_hidden_sizes = encoder_hidden_sizes
+ self.decoder_hidden_size = decoder_hidden_size
+ self.keypoint_decoder_dim = keypoint_decoder_dim
+ self.descriptor_decoder_dim = descriptor_decoder_dim
+ self.keypoint_threshold = keypoint_threshold
+ self.max_keypoints = max_keypoints
+ self.nms_radius = nms_radius
+ self.border_removal_distance = border_removal_distance
+ self.initializer_range = initializer_range
+
+ super().__init__(**kwargs)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/superpoint/convert_superpoint_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/superpoint/convert_superpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..18755bf4fe01b2b6de2a0a2e0970df7f06909c5a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/superpoint/convert_superpoint_to_pytorch.py
@@ -0,0 +1,175 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import argparse
+import os
+
+import requests
+import torch
+from PIL import Image
+
+from transformers import SuperPointConfig, SuperPointForKeypointDetection, SuperPointImageProcessor
+
+
+def get_superpoint_config():
+ config = SuperPointConfig(
+ encoder_hidden_sizes=[64, 64, 128, 128],
+ decoder_hidden_size=256,
+ keypoint_decoder_dim=65,
+ descriptor_decoder_dim=256,
+ keypoint_threshold=0.005,
+ max_keypoints=-1,
+ nms_radius=4,
+ border_removal_distance=4,
+ initializer_range=0.02,
+ )
+
+ return config
+
+
+def create_rename_keys(config, state_dict):
+ rename_keys = []
+
+ # Encoder weights
+ rename_keys.append(("conv1a.weight", "encoder.conv_blocks.0.conv_a.weight"))
+ rename_keys.append(("conv1b.weight", "encoder.conv_blocks.0.conv_b.weight"))
+ rename_keys.append(("conv2a.weight", "encoder.conv_blocks.1.conv_a.weight"))
+ rename_keys.append(("conv2b.weight", "encoder.conv_blocks.1.conv_b.weight"))
+ rename_keys.append(("conv3a.weight", "encoder.conv_blocks.2.conv_a.weight"))
+ rename_keys.append(("conv3b.weight", "encoder.conv_blocks.2.conv_b.weight"))
+ rename_keys.append(("conv4a.weight", "encoder.conv_blocks.3.conv_a.weight"))
+ rename_keys.append(("conv4b.weight", "encoder.conv_blocks.3.conv_b.weight"))
+ rename_keys.append(("conv1a.bias", "encoder.conv_blocks.0.conv_a.bias"))
+ rename_keys.append(("conv1b.bias", "encoder.conv_blocks.0.conv_b.bias"))
+ rename_keys.append(("conv2a.bias", "encoder.conv_blocks.1.conv_a.bias"))
+ rename_keys.append(("conv2b.bias", "encoder.conv_blocks.1.conv_b.bias"))
+ rename_keys.append(("conv3a.bias", "encoder.conv_blocks.2.conv_a.bias"))
+ rename_keys.append(("conv3b.bias", "encoder.conv_blocks.2.conv_b.bias"))
+ rename_keys.append(("conv4a.bias", "encoder.conv_blocks.3.conv_a.bias"))
+ rename_keys.append(("conv4b.bias", "encoder.conv_blocks.3.conv_b.bias"))
+
+ # Keypoint Decoder weights
+ rename_keys.append(("convPa.weight", "keypoint_decoder.conv_score_a.weight"))
+ rename_keys.append(("convPb.weight", "keypoint_decoder.conv_score_b.weight"))
+ rename_keys.append(("convPa.bias", "keypoint_decoder.conv_score_a.bias"))
+ rename_keys.append(("convPb.bias", "keypoint_decoder.conv_score_b.bias"))
+
+ # Descriptor Decoder weights
+ rename_keys.append(("convDa.weight", "descriptor_decoder.conv_descriptor_a.weight"))
+ rename_keys.append(("convDb.weight", "descriptor_decoder.conv_descriptor_b.weight"))
+ rename_keys.append(("convDa.bias", "descriptor_decoder.conv_descriptor_a.bias"))
+ rename_keys.append(("convDb.bias", "descriptor_decoder.conv_descriptor_b.bias"))
+
+ return rename_keys
+
+
+def rename_key(dct, old, new):
+ val = dct.pop(old)
+ dct[new] = val
+
+
+def prepare_imgs():
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ im1 = Image.open(requests.get(url, stream=True).raw)
+ url = "http://images.cocodataset.org/test-stuff2017/000000004016.jpg"
+ im2 = Image.open(requests.get(url, stream=True).raw)
+ return [im1, im2]
+
+
+@torch.no_grad()
+def convert_superpoint_checkpoint(checkpoint_url, pytorch_dump_folder_path, save_model, push_to_hub, test_mode=False):
+ """
+ Copy/paste/tweak model's weights to our SuperPoint structure.
+ """
+
+ print("Downloading original model from checkpoint...")
+ config = get_superpoint_config()
+
+ # load original state_dict from URL
+ original_state_dict = torch.hub.load_state_dict_from_url(checkpoint_url)
+
+ print("Converting model parameters...")
+ # rename keys
+ rename_keys = create_rename_keys(config, original_state_dict)
+ new_state_dict = original_state_dict.copy()
+ for src, dest in rename_keys:
+ rename_key(new_state_dict, src, dest)
+
+ # Load HuggingFace model
+ model = SuperPointForKeypointDetection(config)
+ model.load_state_dict(new_state_dict)
+ model.eval()
+ print("Successfully loaded weights in the model")
+
+ # Check model outputs
+ preprocessor = SuperPointImageProcessor()
+ inputs = preprocessor(images=prepare_imgs(), return_tensors="pt")
+ outputs = model(**inputs)
+
+ # If test_mode is True, we check that the model outputs match the original results
+ if test_mode:
+ torch.count_nonzero(outputs.mask[0])
+ expected_keypoints_shape = (2, 830, 2)
+ expected_scores_shape = (2, 830)
+ expected_descriptors_shape = (2, 830, 256)
+
+ expected_keypoints_values = torch.tensor([[480.0, 9.0], [494.0, 9.0], [489.0, 16.0]])
+ expected_scores_values = torch.tensor([0.0064, 0.0140, 0.0595, 0.0728, 0.5170, 0.0175, 0.1523, 0.2055, 0.0336])
+ expected_descriptors_value = torch.tensor(-0.1096)
+ assert outputs.keypoints.shape == expected_keypoints_shape
+ assert outputs.scores.shape == expected_scores_shape
+ assert outputs.descriptors.shape == expected_descriptors_shape
+
+ assert torch.allclose(outputs.keypoints[0, :3], expected_keypoints_values, atol=1e-3)
+ assert torch.allclose(outputs.scores[0, :9], expected_scores_values, atol=1e-3)
+ assert torch.allclose(outputs.descriptors[0, 0, 0], expected_descriptors_value, atol=1e-3)
+ print("Model outputs match the original results!")
+
+ if save_model:
+ print("Saving model to local...")
+ # Create folder to save model
+ if not os.path.isdir(pytorch_dump_folder_path):
+ os.mkdir(pytorch_dump_folder_path)
+
+ model.save_pretrained(pytorch_dump_folder_path)
+ preprocessor.save_pretrained(pytorch_dump_folder_path)
+
+ model_name = "superpoint"
+ if push_to_hub:
+ print(f"Pushing {model_name} to the hub...")
+ model.push_to_hub(model_name)
+ preprocessor.push_to_hub(model_name)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--checkpoint_url",
+ default="https://github.com/magicleap/SuperPointPretrainedNetwork/raw/master/superpoint_v1.pth",
+ type=str,
+ help="URL of the original SuperPoint checkpoint you'd like to convert.",
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path",
+ default="model",
+ type=str,
+ help="Path to the output PyTorch model directory.",
+ )
+ parser.add_argument("--save_model", action="store_true", help="Save model to local")
+ parser.add_argument("--push_to_hub", action="store_true", help="Push model and image preprocessor to the hub")
+
+ args = parser.parse_args()
+ convert_superpoint_checkpoint(
+ args.checkpoint_url, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/superpoint/image_processing_superpoint.py b/venv/lib/python3.10/site-packages/transformers/models/superpoint/image_processing_superpoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..fbbb717570cb704edcccecb50bb863c5038a4dd3
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/superpoint/image_processing_superpoint.py
@@ -0,0 +1,272 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Image processor class for SuperPoint."""
+
+from typing import Dict, Optional, Union
+
+import numpy as np
+
+from ... import is_vision_available
+from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
+from ...image_transforms import resize, to_channel_dimension_format
+from ...image_utils import (
+ ChannelDimension,
+ ImageInput,
+ infer_channel_dimension_format,
+ is_scaled_image,
+ make_list_of_images,
+ to_numpy_array,
+ valid_images,
+)
+from ...utils import TensorType, logging, requires_backends
+
+
+if is_vision_available():
+ import PIL
+
+logger = logging.get_logger(__name__)
+
+
+def is_grayscale(
+ image: ImageInput,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+):
+ if input_data_format == ChannelDimension.FIRST:
+ return np.all(image[0, ...] == image[1, ...]) and np.all(image[1, ...] == image[2, ...])
+ elif input_data_format == ChannelDimension.LAST:
+ return np.all(image[..., 0] == image[..., 1]) and np.all(image[..., 1] == image[..., 2])
+
+
+def convert_to_grayscale(
+ image: ImageInput,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+) -> ImageInput:
+ """
+ Converts an image to grayscale format using the NTSC formula. Only support numpy and PIL Image. TODO support torch
+ and tensorflow grayscale conversion
+
+ This function is supposed to return a 1-channel image, but it returns a 3-channel image with the same value in each
+ channel, because of an issue that is discussed in :
+ https://github.com/huggingface/transformers/pull/25786#issuecomment-1730176446
+
+ Args:
+ image (Image):
+ The image to convert.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image.
+ """
+ requires_backends(convert_to_grayscale, ["vision"])
+
+ if isinstance(image, np.ndarray):
+ if input_data_format == ChannelDimension.FIRST:
+ gray_image = image[0, ...] * 0.2989 + image[1, ...] * 0.5870 + image[2, ...] * 0.1140
+ gray_image = np.stack([gray_image] * 3, axis=0)
+ elif input_data_format == ChannelDimension.LAST:
+ gray_image = image[..., 0] * 0.2989 + image[..., 1] * 0.5870 + image[..., 2] * 0.1140
+ gray_image = np.stack([gray_image] * 3, axis=-1)
+ return gray_image
+
+ if not isinstance(image, PIL.Image.Image):
+ return image
+
+ image = image.convert("L")
+ return image
+
+
+class SuperPointImageProcessor(BaseImageProcessor):
+ r"""
+ Constructs a SuperPoint image processor.
+
+ Args:
+ do_resize (`bool`, *optional*, defaults to `True`):
+ Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overriden
+ by `do_resize` in the `preprocess` method.
+ size (`Dict[str, int]` *optional*, defaults to `{"height": 480, "width": 640}`):
+ Resolution of the output image after `resize` is applied. Only has an effect if `do_resize` is set to
+ `True`. Can be overriden by `size` in the `preprocess` method.
+ do_rescale (`bool`, *optional*, defaults to `True`):
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overriden by `do_rescale` in
+ the `preprocess` method.
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
+ Scale factor to use if rescaling the image. Can be overriden by `rescale_factor` in the `preprocess`
+ method.
+ """
+
+ model_input_names = ["pixel_values"]
+
+ def __init__(
+ self,
+ do_resize: bool = True,
+ size: Dict[str, int] = None,
+ do_rescale: bool = True,
+ rescale_factor: float = 1 / 255,
+ **kwargs,
+ ) -> None:
+ super().__init__(**kwargs)
+ size = size if size is not None else {"height": 480, "width": 640}
+ size = get_size_dict(size, default_to_square=False)
+
+ self.do_resize = do_resize
+ self.size = size
+ self.do_rescale = do_rescale
+ self.rescale_factor = rescale_factor
+
+ def resize(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ):
+ """
+ Resize an image.
+
+ Args:
+ image (`np.ndarray`):
+ Image to resize.
+ size (`Dict[str, int]`):
+ Dictionary of the form `{"height": int, "width": int}`, specifying the size of the output image.
+ data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format of the output image. If not provided, it will be inferred from the input
+ image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ """
+ size = get_size_dict(size, default_to_square=False)
+
+ return resize(
+ image,
+ size=(size["height"], size["width"]),
+ data_format=data_format,
+ input_data_format=input_data_format,
+ **kwargs,
+ )
+
+ def preprocess(
+ self,
+ images,
+ do_resize: bool = None,
+ size: Dict[str, int] = None,
+ do_rescale: bool = None,
+ rescale_factor: float = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ data_format: ChannelDimension = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> BatchFeature:
+ """
+ Preprocess an image or batch of images.
+
+ Args:
+ images (`ImageInput`):
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
+ Whether to resize the image.
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
+ Size of the output image after `resize` has been applied. If `size["shortest_edge"]` >= 384, the image
+ is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the
+ image will be matched to `int(size["shortest_edge"]/ crop_pct)`, after which the image is cropped to
+ `(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`.
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
+ Whether to rescale the image values between [0 - 1].
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Can be one of:
+ - Unset: Return a list of `np.ndarray`.
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format for the output image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - Unset: Use the channel dimension format of the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ """
+
+ do_resize = do_resize if do_resize is not None else self.do_resize
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
+
+ size = size if size is not None else self.size
+ size = get_size_dict(size, default_to_square=False)
+
+ images = make_list_of_images(images)
+
+ if not valid_images(images):
+ raise ValueError(
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+
+ if do_resize and size is None:
+ raise ValueError("Size must be specified if do_resize is True.")
+
+ if do_rescale and rescale_factor is None:
+ raise ValueError("Rescale factor must be specified if do_rescale is True.")
+
+ # All transformations expect numpy arrays.
+ images = [to_numpy_array(image) for image in images]
+
+ if is_scaled_image(images[0]) and do_rescale:
+ logger.warning_once(
+ "It looks like you are trying to rescale already rescaled images. If the input"
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
+ )
+
+ if input_data_format is None:
+ # We assume that all images have the same channel dimension format.
+ input_data_format = infer_channel_dimension_format(images[0])
+
+ if do_resize:
+ images = [self.resize(image=image, size=size, input_data_format=input_data_format) for image in images]
+
+ if do_rescale:
+ images = [
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ if input_data_format is None:
+ # We assume that all images have the same channel dimension format.
+ input_data_format = infer_channel_dimension_format(images[0])
+
+ # Checking if image is RGB or grayscale
+ for i in range(len(images)):
+ if not is_grayscale(images[i], input_data_format):
+ images[i] = convert_to_grayscale(images[i], input_data_format=input_data_format)
+
+ images = [
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
+ ]
+
+ data = {"pixel_values": images}
+
+ return BatchFeature(data=data, tensor_type=return_tensors)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/superpoint/modeling_superpoint.py b/venv/lib/python3.10/site-packages/transformers/models/superpoint/modeling_superpoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e3fdbbf10cfb14921704c3831afe6494ceec504
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/superpoint/modeling_superpoint.py
@@ -0,0 +1,500 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch SuperPoint model."""
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import torch
+from torch import nn
+
+from transformers import PreTrainedModel
+from transformers.modeling_outputs import (
+ BaseModelOutputWithNoAttention,
+)
+from transformers.models.superpoint.configuration_superpoint import SuperPointConfig
+
+from ...pytorch_utils import is_torch_greater_or_equal_than_1_13
+from ...utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+)
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "SuperPointConfig"
+
+_CHECKPOINT_FOR_DOC = "magic-leap-community/superpoint"
+
+SUPERPOINT_PRETRAINED_MODEL_ARCHIVE_LIST = ["magic-leap-community/superpoint"]
+
+
+def remove_keypoints_from_borders(
+ keypoints: torch.Tensor, scores: torch.Tensor, border: int, height: int, width: int
+) -> Tuple[torch.Tensor, torch.Tensor]:
+ """Removes keypoints (and their associated scores) that are too close to the border"""
+ mask_h = (keypoints[:, 0] >= border) & (keypoints[:, 0] < (height - border))
+ mask_w = (keypoints[:, 1] >= border) & (keypoints[:, 1] < (width - border))
+ mask = mask_h & mask_w
+ return keypoints[mask], scores[mask]
+
+
+def top_k_keypoints(keypoints: torch.Tensor, scores: torch.Tensor, k: int) -> Tuple[torch.Tensor, torch.Tensor]:
+ """Keeps the k keypoints with highest score"""
+ if k >= len(keypoints):
+ return keypoints, scores
+ scores, indices = torch.topk(scores, k, dim=0)
+ return keypoints[indices], scores
+
+
+def simple_nms(scores: torch.Tensor, nms_radius: int) -> torch.Tensor:
+ """Applies non-maximum suppression on scores"""
+ if nms_radius < 0:
+ raise ValueError("Expected positive values for nms_radius")
+
+ def max_pool(x):
+ return nn.functional.max_pool2d(x, kernel_size=nms_radius * 2 + 1, stride=1, padding=nms_radius)
+
+ zeros = torch.zeros_like(scores)
+ max_mask = scores == max_pool(scores)
+ for _ in range(2):
+ supp_mask = max_pool(max_mask.float()) > 0
+ supp_scores = torch.where(supp_mask, zeros, scores)
+ new_max_mask = supp_scores == max_pool(supp_scores)
+ max_mask = max_mask | (new_max_mask & (~supp_mask))
+ return torch.where(max_mask, scores, zeros)
+
+
+@dataclass
+class SuperPointKeypointDescriptionOutput(ModelOutput):
+ """
+ Base class for outputs of image point description models. Due to the nature of keypoint detection, the number of
+ keypoints is not fixed and can vary from image to image, which makes batching non-trivial. In the batch of images,
+ the maximum number of keypoints is set as the dimension of the keypoints, scores and descriptors tensors. The mask
+ tensor is used to indicate which values in the keypoints, scores and descriptors tensors are keypoint information
+ and which are padding.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*):
+ Loss computed during training.
+ keypoints (`torch.FloatTensor` of shape `(batch_size, num_keypoints, 2)`):
+ Relative (x, y) coordinates of predicted keypoints in a given image.
+ scores (`torch.FloatTensor` of shape `(batch_size, num_keypoints)`):
+ Scores of predicted keypoints.
+ descriptors (`torch.FloatTensor` of shape `(batch_size, num_keypoints, descriptor_size)`):
+ Descriptors of predicted keypoints.
+ mask (`torch.BoolTensor` of shape `(batch_size, num_keypoints)`):
+ Mask indicating which values in keypoints, scores and descriptors are keypoint information.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or
+ when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states
+ (also called feature maps) of the model at the output of each stage.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ keypoints: Optional[torch.IntTensor] = None
+ scores: Optional[torch.FloatTensor] = None
+ descriptors: Optional[torch.FloatTensor] = None
+ mask: Optional[torch.BoolTensor] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+
+
+class SuperPointConvBlock(nn.Module):
+ def __init__(
+ self, config: SuperPointConfig, in_channels: int, out_channels: int, add_pooling: bool = False
+ ) -> None:
+ super().__init__()
+ self.conv_a = nn.Conv2d(
+ in_channels,
+ out_channels,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ )
+ self.conv_b = nn.Conv2d(
+ out_channels,
+ out_channels,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ )
+ self.relu = nn.ReLU(inplace=True)
+ self.pool = nn.MaxPool2d(kernel_size=2, stride=2) if add_pooling else None
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.relu(self.conv_a(hidden_states))
+ hidden_states = self.relu(self.conv_b(hidden_states))
+ if self.pool is not None:
+ hidden_states = self.pool(hidden_states)
+ return hidden_states
+
+
+class SuperPointEncoder(nn.Module):
+ """
+ SuperPoint encoder module. It is made of 4 convolutional layers with ReLU activation and max pooling, reducing the
+ dimensionality of the image.
+ """
+
+ def __init__(self, config: SuperPointConfig) -> None:
+ super().__init__()
+ # SuperPoint uses 1 channel images
+ self.input_dim = 1
+
+ conv_blocks = []
+ conv_blocks.append(
+ SuperPointConvBlock(config, self.input_dim, config.encoder_hidden_sizes[0], add_pooling=True)
+ )
+ for i in range(1, len(config.encoder_hidden_sizes) - 1):
+ conv_blocks.append(
+ SuperPointConvBlock(
+ config, config.encoder_hidden_sizes[i - 1], config.encoder_hidden_sizes[i], add_pooling=True
+ )
+ )
+ conv_blocks.append(
+ SuperPointConvBlock(
+ config, config.encoder_hidden_sizes[-2], config.encoder_hidden_sizes[-1], add_pooling=False
+ )
+ )
+ self.conv_blocks = nn.ModuleList(conv_blocks)
+
+ def forward(
+ self,
+ input,
+ output_hidden_states: Optional[bool] = False,
+ return_dict: Optional[bool] = True,
+ ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
+ all_hidden_states = () if output_hidden_states else None
+
+ for conv_block in self.conv_blocks:
+ input = conv_block(input)
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (input,)
+ output = input
+ if not return_dict:
+ return tuple(v for v in [output, all_hidden_states] if v is not None)
+
+ return BaseModelOutputWithNoAttention(
+ last_hidden_state=output,
+ hidden_states=all_hidden_states,
+ )
+
+
+class SuperPointInterestPointDecoder(nn.Module):
+ """
+ The SuperPointInterestPointDecoder uses the output of the SuperPointEncoder to compute the keypoint with scores.
+ The scores are first computed by a convolutional layer, then a softmax is applied to get a probability distribution
+ over the 65 possible keypoint classes. The keypoints are then extracted from the scores by thresholding and
+ non-maximum suppression. Post-processing is then applied to remove keypoints too close to the image borders as well
+ as to keep only the k keypoints with highest score.
+ """
+
+ def __init__(self, config: SuperPointConfig) -> None:
+ super().__init__()
+ self.keypoint_threshold = config.keypoint_threshold
+ self.max_keypoints = config.max_keypoints
+ self.nms_radius = config.nms_radius
+ self.border_removal_distance = config.border_removal_distance
+
+ self.relu = nn.ReLU(inplace=True)
+ self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
+ self.conv_score_a = nn.Conv2d(
+ config.encoder_hidden_sizes[-1],
+ config.decoder_hidden_size,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ )
+ self.conv_score_b = nn.Conv2d(
+ config.decoder_hidden_size, config.keypoint_decoder_dim, kernel_size=1, stride=1, padding=0
+ )
+
+ def forward(self, encoded: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
+ scores = self._get_pixel_scores(encoded)
+ keypoints, scores = self._extract_keypoints(scores)
+
+ return keypoints, scores
+
+ def _get_pixel_scores(self, encoded: torch.Tensor) -> torch.Tensor:
+ """Based on the encoder output, compute the scores for each pixel of the image"""
+ scores = self.relu(self.conv_score_a(encoded))
+ scores = self.conv_score_b(scores)
+ scores = nn.functional.softmax(scores, 1)[:, :-1]
+ batch_size, _, height, width = scores.shape
+ scores = scores.permute(0, 2, 3, 1).reshape(batch_size, height, width, 8, 8)
+ scores = scores.permute(0, 1, 3, 2, 4).reshape(batch_size, height * 8, width * 8)
+ scores = simple_nms(scores, self.nms_radius)
+ return scores
+
+ def _extract_keypoints(self, scores: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
+ """Based on their scores, extract the pixels that represent the keypoints that will be used for descriptors computation"""
+ _, height, width = scores.shape
+
+ # Threshold keypoints by score value
+ keypoints = torch.nonzero(scores[0] > self.keypoint_threshold)
+ scores = scores[0][tuple(keypoints.t())]
+
+ # Discard keypoints near the image borders
+ keypoints, scores = remove_keypoints_from_borders(
+ keypoints, scores, self.border_removal_distance, height * 8, width * 8
+ )
+
+ # Keep the k keypoints with highest score
+ if self.max_keypoints >= 0:
+ keypoints, scores = top_k_keypoints(keypoints, scores, self.max_keypoints)
+
+ # Convert (y, x) to (x, y)
+ keypoints = torch.flip(keypoints, [1]).float()
+
+ return keypoints, scores
+
+
+class SuperPointDescriptorDecoder(nn.Module):
+ """
+ The SuperPointDescriptorDecoder uses the outputs of both the SuperPointEncoder and the
+ SuperPointInterestPointDecoder to compute the descriptors at the keypoints locations.
+
+ The descriptors are first computed by a convolutional layer, then normalized to have a norm of 1. The descriptors
+ are then interpolated at the keypoints locations.
+ """
+
+ def __init__(self, config: SuperPointConfig) -> None:
+ super().__init__()
+
+ self.relu = nn.ReLU(inplace=True)
+ self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
+ self.conv_descriptor_a = nn.Conv2d(
+ config.encoder_hidden_sizes[-1],
+ config.decoder_hidden_size,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ )
+ self.conv_descriptor_b = nn.Conv2d(
+ config.decoder_hidden_size,
+ config.descriptor_decoder_dim,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ )
+
+ def forward(self, encoded: torch.Tensor, keypoints: torch.Tensor) -> torch.Tensor:
+ """Based on the encoder output and the keypoints, compute the descriptors for each keypoint"""
+ descriptors = self.conv_descriptor_b(self.relu(self.conv_descriptor_a(encoded)))
+ descriptors = nn.functional.normalize(descriptors, p=2, dim=1)
+
+ descriptors = self._sample_descriptors(keypoints[None], descriptors[0][None], 8)[0]
+
+ # [descriptor_dim, num_keypoints] -> [num_keypoints, descriptor_dim]
+ descriptors = torch.transpose(descriptors, 0, 1)
+
+ return descriptors
+
+ @staticmethod
+ def _sample_descriptors(keypoints, descriptors, scale: int = 8) -> torch.Tensor:
+ """Interpolate descriptors at keypoint locations"""
+ batch_size, num_channels, height, width = descriptors.shape
+ keypoints = keypoints - scale / 2 + 0.5
+ divisor = torch.tensor([[(width * scale - scale / 2 - 0.5), (height * scale - scale / 2 - 0.5)]])
+ divisor = divisor.to(keypoints)
+ keypoints /= divisor
+ keypoints = keypoints * 2 - 1 # normalize to (-1, 1)
+ kwargs = {"align_corners": True} if is_torch_greater_or_equal_than_1_13 else {}
+ # [batch_size, num_channels, num_keypoints, 2] -> [batch_size, num_channels, num_keypoints, 2]
+ keypoints = keypoints.view(batch_size, 1, -1, 2)
+ descriptors = nn.functional.grid_sample(descriptors, keypoints, mode="bilinear", **kwargs)
+ # [batch_size, descriptor_decoder_dim, num_channels, num_keypoints] -> [batch_size, descriptor_decoder_dim, num_keypoints]
+ descriptors = descriptors.reshape(batch_size, num_channels, -1)
+ descriptors = nn.functional.normalize(descriptors, p=2, dim=1)
+ return descriptors
+
+
+class SuperPointPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = SuperPointConfig
+ base_model_prefix = "superpoint"
+ main_input_name = "pixel_values"
+ supports_gradient_checkpointing = False
+
+ def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
+ """Initialize the weights"""
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+ def extract_one_channel_pixel_values(self, pixel_values: torch.FloatTensor) -> torch.FloatTensor:
+ """
+ Assuming pixel_values has shape (batch_size, 3, height, width), and that all channels values are the same,
+ extract the first channel value to get a tensor of shape (batch_size, 1, height, width) for SuperPoint. This is
+ a workaround for the issue discussed in :
+ https://github.com/huggingface/transformers/pull/25786#issuecomment-1730176446
+
+ Args:
+ pixel_values: torch.FloatTensor of shape (batch_size, 3, height, width)
+
+ Returns:
+ pixel_values: torch.FloatTensor of shape (batch_size, 1, height, width)
+
+ """
+ return pixel_values[:, 0, :, :][:, None, :, :]
+
+
+SUPERPOINT_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`SuperPointConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+ """
+
+SUPERPOINT_INPUTS_DOCSTRING = r"""
+Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`SuperPointImageProcessor`]. See
+ [`SuperPointImageProcessor.__call__`] for details.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more
+ detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+
+
+@add_start_docstrings(
+ "SuperPoint model outputting keypoints and descriptors.",
+ SUPERPOINT_START_DOCSTRING,
+)
+class SuperPointForKeypointDetection(SuperPointPreTrainedModel):
+ """
+ SuperPoint model. It consists of a SuperPointEncoder, a SuperPointInterestPointDecoder and a
+ SuperPointDescriptorDecoder. SuperPoint was proposed in `SuperPoint: Self-Supervised Interest Point Detection and
+ Description `__ by Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. It
+ is a fully convolutional neural network that extracts keypoints and descriptors from an image. It is trained in a
+ self-supervised manner, using a combination of a photometric loss and a loss based on the homographic adaptation of
+ keypoints. It is made of a convolutional encoder and two decoders: one for keypoints and one for descriptors.
+ """
+
+ def __init__(self, config: SuperPointConfig) -> None:
+ super().__init__(config)
+
+ self.config = config
+
+ self.encoder = SuperPointEncoder(config)
+ self.keypoint_decoder = SuperPointInterestPointDecoder(config)
+ self.descriptor_decoder = SuperPointDescriptorDecoder(config)
+
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(SUPERPOINT_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ pixel_values: torch.FloatTensor,
+ labels: Optional[torch.LongTensor] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, SuperPointKeypointDescriptionOutput]:
+ """
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, SuperPointForKeypointDetection
+ >>> import torch
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> processor = AutoImageProcessor.from_pretrained("magic-leap-community/superpoint")
+ >>> model = SuperPointForKeypointDetection.from_pretrained("magic-leap-community/superpoint")
+
+ >>> inputs = processor(image, return_tensors="pt")
+ >>> outputs = model(**inputs)
+ ```"""
+ loss = None
+ if labels is not None:
+ raise ValueError("SuperPoint does not support training for now.")
+
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ pixel_values = self.extract_one_channel_pixel_values(pixel_values)
+
+ batch_size = pixel_values.shape[0]
+
+ encoder_outputs = self.encoder(
+ pixel_values,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ last_hidden_state = encoder_outputs[0]
+
+ list_keypoints_scores = [
+ self.keypoint_decoder(last_hidden_state[None, ...]) for last_hidden_state in last_hidden_state
+ ]
+
+ list_keypoints = [keypoints_scores[0] for keypoints_scores in list_keypoints_scores]
+ list_scores = [keypoints_scores[1] for keypoints_scores in list_keypoints_scores]
+
+ list_descriptors = [
+ self.descriptor_decoder(last_hidden_state[None, ...], keypoints[None, ...])
+ for last_hidden_state, keypoints in zip(last_hidden_state, list_keypoints)
+ ]
+
+ maximum_num_keypoints = max(keypoints.shape[0] for keypoints in list_keypoints)
+
+ keypoints = torch.zeros((batch_size, maximum_num_keypoints, 2), device=pixel_values.device)
+ scores = torch.zeros((batch_size, maximum_num_keypoints), device=pixel_values.device)
+ descriptors = torch.zeros(
+ (batch_size, maximum_num_keypoints, self.config.descriptor_decoder_dim),
+ device=pixel_values.device,
+ )
+ mask = torch.zeros((batch_size, maximum_num_keypoints), device=pixel_values.device, dtype=torch.int)
+
+ for i, (_keypoints, _scores, _descriptors) in enumerate(zip(list_keypoints, list_scores, list_descriptors)):
+ keypoints[i, : _keypoints.shape[0]] = _keypoints
+ scores[i, : _scores.shape[0]] = _scores
+ descriptors[i, : _descriptors.shape[0]] = _descriptors
+ mask[i, : _scores.shape[0]] = 1
+
+ hidden_states = encoder_outputs[1] if output_hidden_states else None
+ if not return_dict:
+ return tuple(v for v in [loss, keypoints, scores, descriptors, mask, hidden_states] if v is not None)
+
+ return SuperPointKeypointDescriptionOutput(
+ loss=loss,
+ keypoints=keypoints,
+ scores=scores,
+ descriptors=descriptors,
+ mask=mask,
+ hidden_states=hidden_states,
+ )