diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..14cf8bb5879320c3838808bea5715ac06b046fd9
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/__init__.py
@@ -0,0 +1,71 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available
+
+
+_import_structure = {"configuration_bert_generation": ["BertGenerationConfig"]}
+
+try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_bert_generation"] = ["BertGenerationTokenizer"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_bert_generation"] = [
+ "BertGenerationDecoder",
+ "BertGenerationEncoder",
+ "BertGenerationPreTrainedModel",
+ "load_tf_weights_in_bert_generation",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_bert_generation import BertGenerationConfig
+
+ try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_bert_generation import BertGenerationTokenizer
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_bert_generation import (
+ BertGenerationDecoder,
+ BertGenerationEncoder,
+ BertGenerationPreTrainedModel,
+ load_tf_weights_in_bert_generation,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fe98c2948043d034b9c0c12970270fe6b1a9114f
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/configuration_bert_generation.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/configuration_bert_generation.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dc8d0274ae7fb539bba65d956d5db67272b70654
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/configuration_bert_generation.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/modeling_bert_generation.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/modeling_bert_generation.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e1eab4755faa5ad91cc83d7e8754dd2c8b80fd7b
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/modeling_bert_generation.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/tokenization_bert_generation.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/tokenization_bert_generation.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..474b8c9d5bfbeb2087ab1db85b2beb7d4a4712ca
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/__pycache__/tokenization_bert_generation.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/configuration_bert_generation.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/configuration_bert_generation.py
new file mode 100644
index 0000000000000000000000000000000000000000..841aec5c0fb7acc3fb651aa213bf4cf2e1a6a581
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/configuration_bert_generation.py
@@ -0,0 +1,124 @@
+# coding=utf-8
+# Copyright 2020 The Google AI Language Team Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" BertGeneration model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+
+
+class BertGenerationConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`BertGenerationPreTrainedModel`]. It is used to
+ instantiate a BertGeneration model according to the specified arguments, defining the model architecture.
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the BertGeneration
+ [google/bert_for_seq_generation_L-24_bbc_encoder](https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder)
+ architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 50358):
+ Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`BertGeneration`].
+ hidden_size (`int`, *optional*, defaults to 1024):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 24):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 4096):
+ Dimensionality of the "intermediate" (often called feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ pad_token_id (`int`, *optional*, defaults to 0):
+ Padding token id.
+ bos_token_id (`int`, *optional*, defaults to 2):
+ Beginning of stream token id.
+ eos_token_id (`int`, *optional*, defaults to 1):
+ End of stream token id.
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`.
+
+ Examples:
+
+ ```python
+ >>> from transformers import BertGenerationConfig, BertGenerationEncoder
+
+ >>> # Initializing a BertGeneration config
+ >>> configuration = BertGenerationConfig()
+
+ >>> # Initializing a model (with random weights) from the config
+ >>> model = BertGenerationEncoder(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "bert-generation"
+
+ def __init__(
+ self,
+ vocab_size=50358,
+ hidden_size=1024,
+ num_hidden_layers=24,
+ num_attention_heads=16,
+ intermediate_size=4096,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.1,
+ attention_probs_dropout_prob=0.1,
+ max_position_embeddings=512,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ pad_token_id=0,
+ bos_token_id=2,
+ eos_token_id=1,
+ position_embedding_type="absolute",
+ use_cache=True,
+ **kwargs,
+ ):
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
+
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.hidden_act = hidden_act
+ self.intermediate_size = intermediate_size
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.max_position_embeddings = max_position_embeddings
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.position_embedding_type = position_embedding_type
+ self.use_cache = use_cache
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/modeling_bert_generation.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/modeling_bert_generation.py
new file mode 100644
index 0000000000000000000000000000000000000000..b7250f6f7b926fc21102007ce34568d9276615f9
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/modeling_bert_generation.py
@@ -0,0 +1,1008 @@
+# coding=utf-8
+# Copyright 2020 The Google AI Language Team Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch BERT model specific for generation."""
+
+import math
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import CrossEntropyLoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_bert_generation import BertGenerationConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "google/bert_for_seq_generation_L-24_bbc_encoder"
+_CONFIG_FOR_DOC = "BertGenerationConfig"
+
+
+# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->BertGeneration
+class BertGenerationSelfOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->BertGeneration
+class BertGenerationSelfAttention(nn.Module):
+ def __init__(self, config, position_embedding_type=None):
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads})"
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+ self.position_embedding_type = position_embedding_type or getattr(
+ config, "position_embedding_type", "absolute"
+ )
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ self.max_position_embeddings = config.max_position_embeddings
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
+
+ self.is_decoder = config.is_decoder
+
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ mixed_query_layer = self.query(hidden_states)
+
+ # If this is instantiated as a cross-attention module, the keys
+ # and values come from an encoder; the attention mask needs to be
+ # such that the encoder's padding tokens are not attended to.
+ is_cross_attention = encoder_hidden_states is not None
+
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_layer = past_key_value[0]
+ value_layer = past_key_value[1]
+ attention_mask = encoder_attention_mask
+ elif is_cross_attention:
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
+ attention_mask = encoder_attention_mask
+ elif past_key_value is not None:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
+ else:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ use_cache = past_key_value is not None
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_layer, value_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
+ if use_cache:
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
+ -1, 1
+ )
+ else:
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
+ distance = position_ids_l - position_ids_r
+
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
+
+ if self.position_embedding_type == "relative_key":
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores
+ elif self.position_embedding_type == "relative_key_query":
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in BertGenerationModel forward() function)
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ if self.is_decoder:
+ outputs = outputs + (past_key_value,)
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->BertGeneration
+class BertGenerationAttention(nn.Module):
+ def __init__(self, config, position_embedding_type=None):
+ super().__init__()
+ self.self = BertGenerationSelfAttention(config, position_embedding_type=position_embedding_type)
+ self.output = BertGenerationSelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.self.query = prune_linear_layer(self.self.query, index)
+ self.self.key = prune_linear_layer(self.self.key, index)
+ self.self.value = prune_linear_layer(self.self.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ self_outputs = self.self(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+ attention_output = self.output(self_outputs[0], hidden_states)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->BertGeneration
+class BertGenerationIntermediate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->BertGeneration
+class BertGenerationOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->BertGeneration
+class BertGenerationLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.attention = BertGenerationAttention(config)
+ self.is_decoder = config.is_decoder
+ self.add_cross_attention = config.add_cross_attention
+ if self.add_cross_attention:
+ if not self.is_decoder:
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
+ self.crossattention = BertGenerationAttention(config, position_embedding_type="absolute")
+ self.intermediate = BertGenerationIntermediate(config)
+ self.output = BertGenerationOutput(config)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ self_attention_outputs = self.attention(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ output_attentions=output_attentions,
+ past_key_value=self_attn_past_key_value,
+ )
+ attention_output = self_attention_outputs[0]
+
+ # if decoder, the last output is tuple of self-attn cache
+ if self.is_decoder:
+ outputs = self_attention_outputs[1:-1]
+ present_key_value = self_attention_outputs[-1]
+ else:
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ cross_attn_present_key_value = None
+ if self.is_decoder and encoder_hidden_states is not None:
+ if not hasattr(self, "crossattention"):
+ raise ValueError(
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
+ " by setting `config.add_cross_attention=True`"
+ )
+
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ cross_attention_outputs = self.crossattention(
+ attention_output,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ cross_attn_past_key_value,
+ output_attentions,
+ )
+ attention_output = cross_attention_outputs[0]
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
+
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
+ cross_attn_present_key_value = cross_attention_outputs[-1]
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ layer_output = apply_chunking_to_forward(
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
+ )
+ outputs = (layer_output,) + outputs
+
+ # if decoder, return the attn key/values as the last output
+ if self.is_decoder:
+ outputs = outputs + (present_key_value,)
+
+ return outputs
+
+ def feed_forward_chunk(self, attention_output):
+ intermediate_output = self.intermediate(attention_output)
+ layer_output = self.output(intermediate_output, attention_output)
+ return layer_output
+
+
+# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->BertGeneration
+class BertEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList([BertGenerationLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = False,
+ output_hidden_states: Optional[bool] = False,
+ return_dict: Optional[bool] = True,
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ next_decoder_cache = () if use_cache else None
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+ past_key_value = past_key_values[i] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+ if use_cache:
+ next_decoder_cache += (layer_outputs[-1],)
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+ if self.config.add_cross_attention:
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ hidden_states,
+ next_decoder_cache,
+ all_hidden_states,
+ all_self_attentions,
+ all_cross_attentions,
+ ]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_decoder_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+def load_tf_weights_in_bert_generation(
+ model, tf_hub_path, model_class, is_encoder_named_decoder=False, is_encoder=False
+):
+ try:
+ import numpy as np
+ import tensorflow.compat.v1 as tf
+ import tensorflow_hub as hub
+ import tensorflow_text # noqa: F401
+
+ tf.disable_eager_execution()
+ except ImportError:
+ logger.error(
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
+ "https://www.tensorflow.org/install/ for installation instructions."
+ )
+ raise
+ tf_model = hub.Module(tf_hub_path)
+ init = tf.global_variables_initializer()
+ with tf.Session() as sess:
+ init.run()
+ all_variables = tf_model.variable_map
+ keep_track_variables = all_variables.copy()
+ for key in list(all_variables.keys()):
+ if "global" in key:
+ logger.info(f"Skipping {key}...")
+ continue
+ if not is_encoder:
+ model_pointer = getattr(model, model_class)
+ else:
+ model_pointer = model
+ is_embedding = False
+ logger.info(f"Trying to match {key}...")
+ # remove start_string = "module/bert/"
+ sub_layers = key.split("/")[2:]
+ if is_encoder_named_decoder and sub_layers[0] == "encoder":
+ logger.info(f"Skipping encoder layer {key} for decoder")
+ continue
+ if is_encoder and sub_layers[0] == "decoder":
+ logger.info(f"Skipping decoder layer {key} for encoder")
+ continue
+ for i, sub_layer in enumerate(sub_layers):
+ if sub_layer == "embeddings":
+ is_embedding = True
+ elif sub_layer == "LayerNorm":
+ is_embedding = False
+ if "layer" in sub_layer:
+ model_pointer = model_pointer.layer[int(sub_layer.split("_")[-1])]
+ elif sub_layer in ["kernel", "gamma"]:
+ model_pointer = model_pointer.weight
+ elif sub_layer == "beta":
+ model_pointer = model_pointer.bias
+ elif sub_layer == "encdec":
+ model_pointer = model_pointer.crossattention.self
+ elif sub_layer == "encdec_output":
+ model_pointer = model_pointer.crossattention.output
+ elif is_encoder_named_decoder and sub_layer == "decoder":
+ model_pointer = model_pointer.encoder
+ else:
+ if sub_layer == "attention" and "encdec" in sub_layers[i + 1]:
+ continue
+ try:
+ model_pointer = getattr(model_pointer, sub_layer)
+ except AttributeError:
+ logger.info(f"Skipping to initialize {key} at {sub_layer}...")
+ raise AttributeError
+
+ array = np.asarray(sess.run(all_variables[key]))
+ if not is_embedding:
+ logger.info(f"Transposing numpy weight of shape {array.shape} for {key}")
+ array = np.transpose(array)
+ else:
+ model_pointer = model_pointer.weight
+
+ if model_pointer.shape != array.shape:
+ raise ValueError(f"Pointer shape {model_pointer.shape} and array shape {array.shape} mismatched")
+ logger.info(f"Initialize PyTorch weight {key}")
+
+ model_pointer.data = torch.from_numpy(array.astype(np.float32))
+ keep_track_variables.pop(key, None)
+
+ logger.info(f"Weights not copied to PyTorch model: {', '.join(keep_track_variables.keys())}")
+ return model
+
+
+class BertGenerationEmbeddings(nn.Module):
+ """Construct the embeddings from word and position embeddings."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
+ # any TensorFlow checkpoint file
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
+ )
+
+ def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0):
+ if input_ids is not None:
+ input_shape = input_ids.size()
+ else:
+ input_shape = inputs_embeds.size()[:-1]
+
+ seq_length = input_shape[1]
+
+ if position_ids is None:
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
+
+ if inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+ position_embeddings = self.position_embeddings(position_ids)
+
+ embeddings = inputs_embeds + position_embeddings
+ embeddings = self.LayerNorm(embeddings)
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+
+class BertGenerationPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = BertGenerationConfig
+ base_model_prefix = "bert"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+BERT_GENERATION_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`BertGenerationConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+BERT_GENERATION_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
+ [`PreTrainedTokenizer.encode`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare BertGeneration model transformer outputting raw hidden-states without any specific head on top.",
+ BERT_GENERATION_START_DOCSTRING,
+)
+class BertGenerationEncoder(BertGenerationPreTrainedModel):
+ """
+
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
+
+ This model should be used when leveraging Bert or Roberta checkpoints for the [`EncoderDecoderModel`] class as
+ described in [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461)
+ by Sascha Rothe, Shashi Narayan, and Aliaksei Severyn.
+
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
+ """
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = BertGenerationEmbeddings(config)
+ self.encoder = BertEncoder(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.word_embeddings = value
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(BERT_GENERATION_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithPastAndCrossAttentions,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
+ r"""
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: `1` for
+ tokens that are NOT MASKED, `0` for MASKED tokens.
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if self.config.is_decoder:
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ else:
+ use_cache = False
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ batch_size, seq_length = input_shape
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ # past_key_values_length
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+
+ if attention_mask is None:
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
+
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
+ # ourselves in which case we just need to make it broadcastable to all heads.
+ extended_attention_mask = None
+ if not use_cache:
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
+
+ # If a 2D or 3D attention mask is provided for the cross-attention
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
+ if self.config.is_decoder and encoder_hidden_states is not None:
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
+ if encoder_attention_mask is None:
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
+ else:
+ encoder_extended_attention_mask = None
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ embedding_output = self.embeddings(
+ input_ids=input_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ past_key_values_length=past_key_values_length,
+ )
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_extended_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+
+ if not return_dict:
+ return (sequence_output,) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=sequence_output,
+ past_key_values=encoder_outputs.past_key_values,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ cross_attentions=encoder_outputs.cross_attentions,
+ )
+
+
+class BertGenerationOnlyLMHead(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
+ self.decoder.bias = self.bias
+
+ def forward(self, hidden_states):
+ logits = self.decoder(hidden_states)
+ return logits
+
+ def _tie_weights(self):
+ # To tie those two weights if they get disconnected (on TPU or when the bias is resized)
+ self.bias = self.decoder.bias
+
+
+@add_start_docstrings(
+ """BertGeneration Model with a `language modeling` head on top for CLM fine-tuning.""",
+ BERT_GENERATION_START_DOCSTRING,
+)
+class BertGenerationDecoder(BertGenerationPreTrainedModel):
+ _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"]
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ if not config.is_decoder:
+ logger.warning("If you want to use `BertGenerationDecoder` as a standalone, add `is_decoder=True.`")
+
+ self.bert = BertGenerationEncoder(config)
+ self.lm_head = BertGenerationOnlyLMHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.lm_head.decoder
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head.decoder = new_embeddings
+
+ @add_start_docstrings_to_model_forward(BERT_GENERATION_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
+ r"""
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
+ ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, BertGenerationDecoder, BertGenerationConfig
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
+ >>> config = BertGenerationConfig.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
+ >>> config.is_decoder = True
+ >>> model = BertGenerationDecoder.from_pretrained(
+ ... "google/bert_for_seq_generation_L-24_bbc_encoder", config=config
+ ... )
+
+ >>> inputs = tokenizer("Hello, my dog is cute", return_token_type_ids=False, return_tensors="pt")
+ >>> outputs = model(**inputs)
+
+ >>> prediction_logits = outputs.logits
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ if labels is not None:
+ use_cache = False
+
+ outputs = self.bert(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+ prediction_scores = self.lm_head(sequence_output)
+
+ lm_loss = None
+ if labels is not None:
+ # we are doing next-token prediction; shift prediction scores and input ids by one
+ shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
+ labels = labels[:, 1:].contiguous()
+ loss_fct = CrossEntropyLoss()
+ lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[1:]
+ return ((lm_loss,) + output) if lm_loss is not None else output
+
+ return CausalLMOutputWithCrossAttentions(
+ loss=lm_loss,
+ logits=prediction_scores,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
+ input_shape = input_ids.shape
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
+ if attention_mask is None:
+ attention_mask = input_ids.new_ones(input_shape)
+
+ # cut decoder_input_ids if past_key_values is used
+ if past_key_values is not None:
+ past_length = past_key_values[0][0].shape[2]
+
+ # Some generation methods already pass only the last input ID
+ if input_ids.shape[1] > past_length:
+ remove_prefix_length = past_length
+ else:
+ # Default to old behavior: keep only final ID
+ remove_prefix_length = input_ids.shape[1] - 1
+
+ input_ids = input_ids[:, remove_prefix_length:]
+
+ return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values}
+
+ def _reorder_cache(self, past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+ return reordered_past
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/tokenization_bert_generation.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/tokenization_bert_generation.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b6298fcbd8f6e054f7fac417095b188b070f472
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_generation/tokenization_bert_generation.py
@@ -0,0 +1,185 @@
+# coding=utf-8
+# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Tokenization class for model BertGeneration."""
+
+
+import os
+from shutil import copyfile
+from typing import Any, Dict, List, Optional, Tuple
+
+import sentencepiece as spm
+
+from ...tokenization_utils import PreTrainedTokenizer
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {
+ "bert_for_seq_generation": (
+ "https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"
+ ),
+ }
+}
+
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"bert_for_seq_generation": 512}
+
+
+class BertGenerationTokenizer(PreTrainedTokenizer):
+ """
+ Construct a BertGeneration tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
+ contains the vocabulary necessary to instantiate a tokenizer.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The begin of sequence token.
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ sep_token (`str`, *optional*, defaults to `"<::::>"`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ sp_model_kwargs (`dict`, *optional*):
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
+ to set:
+
+ - `enable_sampling`: Enable subword regularization.
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
+
+ - `nbest_size = {0,1}`: No sampling is performed.
+ - `nbest_size > 1`: samples from the nbest_size results.
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
+ using forward-filtering-and-backward-sampling algorithm.
+
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
+ BPE-dropout.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ prefix_tokens: List[int] = []
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file,
+ bos_token="",
+ eos_token="",
+ unk_token="",
+ pad_token="",
+ sep_token="<::::>",
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
+ **kwargs,
+ ) -> None:
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+
+ self.vocab_file = vocab_file
+
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(vocab_file)
+
+ # Add extra_ids to the special token list
+ super().__init__(
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ sep_token=sep_token,
+ sp_model_kwargs=self.sp_model_kwargs,
+ **kwargs,
+ )
+
+ @property
+ def vocab_size(self):
+ return self.sp_model.get_piece_size()
+
+ def get_vocab(self):
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["sp_model"] = None
+ return state
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+
+ # for backward compatibility
+ if not hasattr(self, "sp_model_kwargs"):
+ self.sp_model_kwargs = {}
+
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(self.vocab_file)
+
+ def _tokenize(self, text: str) -> List[str]:
+ """Take as input a string and return a list of strings (tokens) for words/sub-words"""
+ return self.sp_model.encode(text, out_type=str)
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.sp_model.piece_to_id(token)
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ token = self.sp_model.IdToPiece(index)
+ return token
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ current_sub_tokens = []
+ out_string = ""
+ for token in tokens:
+ # make sure that special tokens are not decoded using sentencepiece model
+ if token in self.all_special_tokens:
+ out_string += self.sp_model.decode(current_sub_tokens) + token
+ current_sub_tokens = []
+ else:
+ current_sub_tokens.append(token)
+ out_string += self.sp_model.decode(current_sub_tokens)
+ return out_string.strip()
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+ elif not os.path.isfile(self.vocab_file):
+ with open(out_vocab_file, "wb") as fi:
+ content_spiece_model = self.sp_model.serialized_model_proto()
+ fi.write(content_spiece_model)
+
+ return (out_vocab_file,)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f3c99f58e0cdbdb1b1a3e6b81a3babcc2ef5c8e2
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/bort/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/bort/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/bort/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/bort/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..50f96a8fad704d137ceb5677fe4da06b5d599478
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/bort/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/bort/__pycache__/convert_bort_original_gluonnlp_checkpoint_to_pytorch.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/bort/__pycache__/convert_bort_original_gluonnlp_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..32bd6d15a029e80ee984743494c96638049695b1
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/bort/__pycache__/convert_bort_original_gluonnlp_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/bort/convert_bort_original_gluonnlp_checkpoint_to_pytorch.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/bort/convert_bort_original_gluonnlp_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..5dc9a244c43c78c58e5b1076cccb82847193301b
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/bort/convert_bort_original_gluonnlp_checkpoint_to_pytorch.py
@@ -0,0 +1,319 @@
+# coding=utf-8
+# Copyright 2020, The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert Bort checkpoint."""
+
+
+import argparse
+import os
+
+import gluonnlp as nlp
+import mxnet as mx
+import numpy as np
+import torch
+from gluonnlp.base import get_home_dir
+from gluonnlp.model.bert import BERTEncoder
+from gluonnlp.model.utils import _load_vocab
+from gluonnlp.vocab import Vocab
+from packaging import version
+from torch import nn
+
+from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
+from transformers.models.bert.modeling_bert import (
+ BertIntermediate,
+ BertLayer,
+ BertOutput,
+ BertSelfAttention,
+ BertSelfOutput,
+)
+from transformers.utils import logging
+
+
+if version.parse(nlp.__version__) != version.parse("0.8.3"):
+ raise Exception("requires gluonnlp == 0.8.3")
+
+if version.parse(mx.__version__) != version.parse("1.5.0"):
+ raise Exception("requires mxnet == 1.5.0")
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+SAMPLE_TEXT = "The Nymphenburg Palace is a beautiful palace in Munich!"
+
+
+def convert_bort_checkpoint_to_pytorch(bort_checkpoint_path: str, pytorch_dump_folder_path: str):
+ """
+ Convert the original Bort checkpoint (based on MXNET and Gluonnlp) to our BERT structure-
+ """
+
+ # Original Bort configuration
+ bort_4_8_768_1024_hparams = {
+ "attention_cell": "multi_head",
+ "num_layers": 4,
+ "units": 1024,
+ "hidden_size": 768,
+ "max_length": 512,
+ "num_heads": 8,
+ "scaled": True,
+ "dropout": 0.1,
+ "use_residual": True,
+ "embed_size": 1024,
+ "embed_dropout": 0.1,
+ "word_embed": None,
+ "layer_norm_eps": 1e-5,
+ "token_type_vocab_size": 2,
+ }
+
+ predefined_args = bort_4_8_768_1024_hparams
+
+ # Let's construct the original Bort model here
+ # Taken from official BERT implementation, see:
+ # https://github.com/alexa/bort/blob/master/bort/bort.py
+ encoder = BERTEncoder(
+ attention_cell=predefined_args["attention_cell"],
+ num_layers=predefined_args["num_layers"],
+ units=predefined_args["units"],
+ hidden_size=predefined_args["hidden_size"],
+ max_length=predefined_args["max_length"],
+ num_heads=predefined_args["num_heads"],
+ scaled=predefined_args["scaled"],
+ dropout=predefined_args["dropout"],
+ output_attention=False,
+ output_all_encodings=False,
+ use_residual=predefined_args["use_residual"],
+ activation=predefined_args.get("activation", "gelu"),
+ layer_norm_eps=predefined_args.get("layer_norm_eps", None),
+ )
+
+ # Vocab information needs to be fetched first
+ # It's the same as RoBERTa, so RobertaTokenizer can be used later
+ vocab_name = "openwebtext_ccnews_stories_books_cased"
+
+ # Specify download folder to Gluonnlp's vocab
+ gluon_cache_dir = os.path.join(get_home_dir(), "models")
+ bort_vocab = _load_vocab(vocab_name, None, gluon_cache_dir, cls=Vocab)
+
+ original_bort = nlp.model.BERTModel(
+ encoder,
+ len(bort_vocab),
+ units=predefined_args["units"],
+ embed_size=predefined_args["embed_size"],
+ embed_dropout=predefined_args["embed_dropout"],
+ word_embed=predefined_args["word_embed"],
+ use_pooler=False,
+ use_token_type_embed=False,
+ token_type_vocab_size=predefined_args["token_type_vocab_size"],
+ use_classifier=False,
+ use_decoder=False,
+ )
+
+ original_bort.load_parameters(bort_checkpoint_path, cast_dtype=True, ignore_extra=True)
+ params = original_bort._collect_params_with_prefix()
+
+ # Build our config 🤗
+ hf_bort_config_json = {
+ "architectures": ["BertForMaskedLM"],
+ "attention_probs_dropout_prob": predefined_args["dropout"],
+ "hidden_act": "gelu",
+ "hidden_dropout_prob": predefined_args["dropout"],
+ "hidden_size": predefined_args["embed_size"],
+ "initializer_range": 0.02,
+ "intermediate_size": predefined_args["hidden_size"],
+ "layer_norm_eps": predefined_args["layer_norm_eps"],
+ "max_position_embeddings": predefined_args["max_length"],
+ "model_type": "bort",
+ "num_attention_heads": predefined_args["num_heads"],
+ "num_hidden_layers": predefined_args["num_layers"],
+ "pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
+ "type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
+ "vocab_size": len(bort_vocab),
+ }
+
+ hf_bort_config = BertConfig.from_dict(hf_bort_config_json)
+ hf_bort_model = BertForMaskedLM(hf_bort_config)
+ hf_bort_model.eval()
+
+ # Parameter mapping table (Gluonnlp to Transformers)
+ # * denotes layer index
+ #
+ # | Gluon Parameter | Transformers Parameter
+ # | -------------------------------------------------------------- | ----------------------
+ # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
+ # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
+ # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
+ # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
+ # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
+ # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
+ # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
+ # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
+ # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
+ # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
+ # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
+ # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
+ # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
+ # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
+ # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
+ # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
+ # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
+ # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
+ # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
+ # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
+
+ # Helper function to convert MXNET Arrays to PyTorch
+ def to_torch(mx_array) -> nn.Parameter:
+ return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy()))
+
+ # Check param shapes and map new HF param back
+ def check_and_map_params(hf_param, gluon_param):
+ shape_hf = hf_param.shape
+
+ gluon_param = to_torch(params[gluon_param])
+ shape_gluon = gluon_param.shape
+
+ assert (
+ shape_hf == shape_gluon
+ ), f"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"
+
+ return gluon_param
+
+ hf_bort_model.bert.embeddings.word_embeddings.weight = check_and_map_params(
+ hf_bort_model.bert.embeddings.word_embeddings.weight, "word_embed.0.weight"
+ )
+ hf_bort_model.bert.embeddings.position_embeddings.weight = check_and_map_params(
+ hf_bort_model.bert.embeddings.position_embeddings.weight, "encoder.position_weight"
+ )
+ hf_bort_model.bert.embeddings.LayerNorm.bias = check_and_map_params(
+ hf_bort_model.bert.embeddings.LayerNorm.bias, "encoder.layer_norm.beta"
+ )
+ hf_bort_model.bert.embeddings.LayerNorm.weight = check_and_map_params(
+ hf_bort_model.bert.embeddings.LayerNorm.weight, "encoder.layer_norm.gamma"
+ )
+
+ # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
+ hf_bort_model.bert.embeddings.token_type_embeddings.weight.data = torch.zeros_like(
+ hf_bort_model.bert.embeddings.token_type_embeddings.weight.data
+ )
+
+ for i in range(hf_bort_config.num_hidden_layers):
+ layer: BertLayer = hf_bort_model.bert.encoder.layer[i]
+
+ # self attention
+ self_attn: BertSelfAttention = layer.attention.self
+
+ self_attn.key.bias.data = check_and_map_params(
+ self_attn.key.bias.data, f"encoder.transformer_cells.{i}.attention_cell.proj_key.bias"
+ )
+
+ self_attn.key.weight.data = check_and_map_params(
+ self_attn.key.weight.data, f"encoder.transformer_cells.{i}.attention_cell.proj_key.weight"
+ )
+ self_attn.query.bias.data = check_and_map_params(
+ self_attn.query.bias.data, f"encoder.transformer_cells.{i}.attention_cell.proj_query.bias"
+ )
+ self_attn.query.weight.data = check_and_map_params(
+ self_attn.query.weight.data, f"encoder.transformer_cells.{i}.attention_cell.proj_query.weight"
+ )
+ self_attn.value.bias.data = check_and_map_params(
+ self_attn.value.bias.data, f"encoder.transformer_cells.{i}.attention_cell.proj_value.bias"
+ )
+ self_attn.value.weight.data = check_and_map_params(
+ self_attn.value.weight.data, f"encoder.transformer_cells.{i}.attention_cell.proj_value.weight"
+ )
+
+ # self attention output
+ self_output: BertSelfOutput = layer.attention.output
+
+ self_output.dense.bias = check_and_map_params(
+ self_output.dense.bias, f"encoder.transformer_cells.{i}.proj.bias"
+ )
+ self_output.dense.weight = check_and_map_params(
+ self_output.dense.weight, f"encoder.transformer_cells.{i}.proj.weight"
+ )
+ self_output.LayerNorm.bias = check_and_map_params(
+ self_output.LayerNorm.bias, f"encoder.transformer_cells.{i}.layer_norm.beta"
+ )
+ self_output.LayerNorm.weight = check_and_map_params(
+ self_output.LayerNorm.weight, f"encoder.transformer_cells.{i}.layer_norm.gamma"
+ )
+
+ # intermediate
+ intermediate: BertIntermediate = layer.intermediate
+
+ intermediate.dense.bias = check_and_map_params(
+ intermediate.dense.bias, f"encoder.transformer_cells.{i}.ffn.ffn_1.bias"
+ )
+ intermediate.dense.weight = check_and_map_params(
+ intermediate.dense.weight, f"encoder.transformer_cells.{i}.ffn.ffn_1.weight"
+ )
+
+ # output
+ bert_output: BertOutput = layer.output
+
+ bert_output.dense.bias = check_and_map_params(
+ bert_output.dense.bias, f"encoder.transformer_cells.{i}.ffn.ffn_2.bias"
+ )
+ bert_output.dense.weight = check_and_map_params(
+ bert_output.dense.weight, f"encoder.transformer_cells.{i}.ffn.ffn_2.weight"
+ )
+ bert_output.LayerNorm.bias = check_and_map_params(
+ bert_output.LayerNorm.bias, f"encoder.transformer_cells.{i}.ffn.layer_norm.beta"
+ )
+ bert_output.LayerNorm.weight = check_and_map_params(
+ bert_output.LayerNorm.weight, f"encoder.transformer_cells.{i}.ffn.layer_norm.gamma"
+ )
+
+ # Save space and energy 🎄
+ hf_bort_model.half()
+
+ # Compare output of both models
+ tokenizer = RobertaTokenizer.from_pretrained("FacebookAI/roberta-base")
+
+ input_ids = tokenizer.encode_plus(SAMPLE_TEXT)["input_ids"]
+
+ # Get gluon output
+ gluon_input_ids = mx.nd.array([input_ids])
+ output_gluon = original_bort(inputs=gluon_input_ids, token_types=[])
+
+ # Get Transformer output (save and reload model again)
+ hf_bort_model.save_pretrained(pytorch_dump_folder_path)
+ hf_bort_model = BertModel.from_pretrained(pytorch_dump_folder_path)
+ hf_bort_model.eval()
+
+ input_ids = tokenizer.encode_plus(SAMPLE_TEXT, return_tensors="pt")
+ output_hf = hf_bort_model(**input_ids)[0]
+
+ gluon_layer = output_gluon[0].asnumpy()
+ hf_layer = output_hf[0].detach().numpy()
+
+ max_absolute_diff = np.max(np.abs(hf_layer - gluon_layer)).item()
+ success = np.allclose(gluon_layer, hf_layer, atol=1e-3)
+
+ if success:
+ print("✔️ Both model do output the same tensors")
+ else:
+ print("❌ Both model do **NOT** output the same tensors")
+ print("Absolute difference is:", max_absolute_diff)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
+ )
+ args = parser.parse_args()
+ convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e467090cb4fbfa55ec51ec8232a54180c532ad6c
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__init__.py
@@ -0,0 +1,45 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
+
+
+_import_structure = {"configuration_mmbt": ["MMBTConfig"]}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_mmbt"] = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
+
+
+if TYPE_CHECKING:
+ from .configuration_mmbt import MMBTConfig
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..36b0cbbb7dc612dfba13ca633c88a25d78f5e768
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__pycache__/configuration_mmbt.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__pycache__/configuration_mmbt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..47cfd983f149f0c8b2d1625a3bf013899f03d871
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__pycache__/configuration_mmbt.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__pycache__/modeling_mmbt.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__pycache__/modeling_mmbt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b72bec4d80ae501407aee763c2abbff1264e45de
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__pycache__/modeling_mmbt.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/configuration_mmbt.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/configuration_mmbt.py
new file mode 100644
index 0000000000000000000000000000000000000000..df5161b0927ad26279a273216d1d9ab6d465063a
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/configuration_mmbt.py
@@ -0,0 +1,42 @@
+# coding=utf-8
+# Copyright (c) Facebook, Inc. and its affiliates.
+# Copyright (c) HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" MMBT configuration"""
+
+from ....utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class MMBTConfig(object):
+ """
+ This is the configuration class to store the configuration of a [`MMBTModel`]. It is used to instantiate a MMBT
+ model according to the specified arguments, defining the model architecture.
+
+ Args:
+ config ([`PreTrainedConfig`]):
+ Config of the underlying Transformer models. Its values are copied over to use a single config.
+ num_labels (`int`, *optional*):
+ Size of final Linear layer for classification.
+ modal_hidden_size (`int`, *optional*, defaults to 2048):
+ Embedding dimension of the non-text modality encoder.
+ """
+
+ def __init__(self, config, num_labels=None, modal_hidden_size=2048):
+ self.__dict__ = config.__dict__
+ self.modal_hidden_size = modal_hidden_size
+ if num_labels:
+ self.num_labels = num_labels
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/modeling_mmbt.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/modeling_mmbt.py
new file mode 100644
index 0000000000000000000000000000000000000000..8dc450ce8f6c13346f30e7da045a927a1186e089
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/modeling_mmbt.py
@@ -0,0 +1,408 @@
+# coding=utf-8
+# Copyright (c) Facebook, Inc. and its affiliates.
+# Copyright (c) HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch MMBT model."""
+
+
+import torch
+from torch import nn
+from torch.nn import CrossEntropyLoss, MSELoss
+
+from ....modeling_outputs import BaseModelOutputWithPooling, SequenceClassifierOutput
+from ....modeling_utils import ModuleUtilsMixin
+from ....utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "MMBTConfig"
+
+
+class ModalEmbeddings(nn.Module):
+ """Generic Modal Embeddings which takes in an encoder, and a transformer embedding."""
+
+ def __init__(self, config, encoder, embeddings):
+ super().__init__()
+ self.config = config
+ self.encoder = encoder
+ self.proj_embeddings = nn.Linear(config.modal_hidden_size, config.hidden_size)
+ self.position_embeddings = embeddings.position_embeddings
+ self.token_type_embeddings = embeddings.token_type_embeddings
+ self.word_embeddings = embeddings.word_embeddings
+ self.LayerNorm = embeddings.LayerNorm
+ self.dropout = nn.Dropout(p=config.hidden_dropout_prob)
+
+ def forward(self, input_modal, start_token=None, end_token=None, position_ids=None, token_type_ids=None):
+ token_embeddings = self.proj_embeddings(self.encoder(input_modal))
+ seq_length = token_embeddings.size(1)
+
+ if start_token is not None:
+ start_token_embeds = self.word_embeddings(start_token)
+ seq_length += 1
+ token_embeddings = torch.cat([start_token_embeds.unsqueeze(1), token_embeddings], dim=1)
+
+ if end_token is not None:
+ end_token_embeds = self.word_embeddings(end_token)
+ seq_length += 1
+ token_embeddings = torch.cat([token_embeddings, end_token_embeds.unsqueeze(1)], dim=1)
+
+ if position_ids is None:
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=input_modal.device)
+ position_ids = position_ids.unsqueeze(0).expand(input_modal.size(0), seq_length)
+
+ if token_type_ids is None:
+ token_type_ids = torch.zeros(
+ (input_modal.size(0), seq_length), dtype=torch.long, device=input_modal.device
+ )
+
+ position_embeddings = self.position_embeddings(position_ids)
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
+ embeddings = token_embeddings + position_embeddings + token_type_embeddings
+ embeddings = self.LayerNorm(embeddings)
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+
+MMBT_START_DOCSTRING = r"""
+ MMBT model was proposed in [Supervised Multimodal Bitransformers for Classifying Images and
+ Text](https://github.com/facebookresearch/mmbt) by Douwe Kiela, Suvrat Bhooshan, Hamed Firooz, Davide Testuggine.
+ It's a supervised multimodal bitransformer model that fuses information from text and other image encoders, and
+ obtain state-of-the-art performance on various multimodal classification benchmark tasks.
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`MMBTConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration.
+ transformer (`nn.Module`): A text transformer that is used by MMBT.
+ It should have embeddings, encoder, and pooler attributes.
+ encoder (`nn.Module`): Encoder for the second modality.
+ It should take in a batch of modal inputs and return k, n dimension embeddings.
+"""
+
+MMBT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_modal (`torch.FloatTensor` of shape `(batch_size, ***)`):
+ The other modality data. It will be the shape that the encoder for that type expects. e.g. With an Image
+ Encoder, the shape would be (batch_size, channels, height, width)
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. It does not expect [CLS] token to be added as it's
+ appended to the end of other modality embeddings. Indices can be obtained using [`AutoTokenizer`]. See
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ modal_start_tokens (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Optional start token to be added to Other Modality Embedding. [CLS] Most commonly used for classification
+ tasks.
+ modal_end_tokens (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Optional end token to be added to Other Modality Embedding. [SEP] Most commonly used.
+ attention_mask (*optional*) `torch.FloatTensor` of shape `(batch_size, sequence_length)`:
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (*optional*) `torch.LongTensor` of shape `(batch_size, sequence_length)`:
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ modal_token_type_ids (*optional*) `torch.LongTensor` of shape `(batch_size, modal_sequence_length)`:
+ Segment token indices to indicate different portions of the non-text modality. The embeddings from these
+ tokens will be summed with the respective token embeddings for the non-text modality.
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ modal_position_ids (`torch.LongTensor` of shape `(batch_size, modal_sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings for the non-text modality.
+ Selected in the range `[0, config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, embedding_dim)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare MMBT Model outputting raw hidden-states without any specific head on top.",
+ MMBT_START_DOCSTRING,
+)
+class MMBTModel(nn.Module, ModuleUtilsMixin):
+ def __init__(self, config, transformer, encoder):
+ super().__init__()
+ self.config = config
+ self.transformer = transformer
+ self.modal_encoder = ModalEmbeddings(config, encoder, transformer.embeddings)
+
+ @add_start_docstrings_to_model_forward(MMBT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_modal,
+ input_ids=None,
+ modal_start_tokens=None,
+ modal_end_tokens=None,
+ attention_mask=None,
+ token_type_ids=None,
+ modal_token_type_ids=None,
+ position_ids=None,
+ modal_position_ids=None,
+ head_mask=None,
+ inputs_embeds=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ ):
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ # For example purposes. Not runnable.
+ transformer = BertModel.from_pretrained("google-bert/bert-base-uncased")
+ encoder = ImageEncoder(args)
+ mmbt = MMBTModel(config, transformer, encoder)
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_txt_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_txt_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ modal_embeddings = self.modal_encoder(
+ input_modal,
+ start_token=modal_start_tokens,
+ end_token=modal_end_tokens,
+ position_ids=modal_position_ids,
+ token_type_ids=modal_token_type_ids,
+ )
+
+ input_modal_shape = modal_embeddings.size()[:-1]
+
+ if token_type_ids is None:
+ token_type_ids = torch.ones(input_txt_shape, dtype=torch.long, device=device)
+
+ txt_embeddings = self.transformer.embeddings(
+ input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
+ )
+
+ embedding_output = torch.cat([modal_embeddings, txt_embeddings], 1)
+
+ input_shape = embedding_output.size()[:-1]
+
+ if attention_mask is None:
+ attention_mask = torch.ones(input_shape, device=device)
+ else:
+ attention_mask = torch.cat(
+ [torch.ones(input_modal_shape, device=device, dtype=torch.long), attention_mask], dim=1
+ )
+ if encoder_attention_mask is None:
+ encoder_attention_mask = torch.ones(input_shape, device=device)
+ else:
+ encoder_attention_mask = torch.cat(
+ [torch.ones(input_modal_shape, device=device), encoder_attention_mask], dim=1
+ )
+
+ extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ encoder_outputs = self.transformer.encoder(
+ embedding_output,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_extended_attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = encoder_outputs[0]
+ pooled_output = self.transformer.pooler(sequence_output)
+
+ if not return_dict:
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPooling(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.word_embeddings = value
+
+
+@add_start_docstrings(
+ """
+ MMBT Model with a sequence classification/regression head on top (a linear layer on top of the pooled output)
+ """,
+ MMBT_START_DOCSTRING,
+ MMBT_INPUTS_DOCSTRING,
+)
+class MMBTForClassification(nn.Module):
+ r"""
+ **labels**: (*optional*) `torch.LongTensor` of shape `(batch_size,)`:
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+
+ Returns: *Tuple* comprising various elements depending on the configuration (config) and inputs: **loss**:
+ (*optional*, returned when `labels` is provided) `torch.FloatTensor` of shape `(1,)`: Classification (or
+ regression if config.num_labels==1) loss. **logits**:
+ `torch.FloatTensor` of shape `(batch_size, config.num_labels)` Classification (or regression if
+ config.num_labels==1) scores (before SoftMax).
+ **hidden_states**: (*optional*, returned when `output_hidden_states=True`) list of `torch.FloatTensor` (one for
+ the output of each layer + the output of the embeddings) of shape `(batch_size, sequence_length, hidden_size)`:
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**:
+ (*optional*, returned when `output_attentions=True`) list of `torch.FloatTensor` (one for each layer) of shape
+ `(batch_size, num_heads, sequence_length, sequence_length)`: Attentions weights after the attention softmax, used
+ to compute the weighted average in the self-attention heads.
+
+ Examples:
+
+ ```python
+ # For example purposes. Not runnable.
+ transformer = BertModel.from_pretrained("google-bert/bert-base-uncased")
+ encoder = ImageEncoder(args)
+ model = MMBTForClassification(config, transformer, encoder)
+ outputs = model(input_modal, input_ids, labels=labels)
+ loss, logits = outputs[:2]
+ ```"""
+
+ def __init__(self, config, transformer, encoder):
+ super().__init__()
+ self.num_labels = config.num_labels
+
+ self.mmbt = MMBTModel(config, transformer, encoder)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ def forward(
+ self,
+ input_modal,
+ input_ids=None,
+ modal_start_tokens=None,
+ modal_end_tokens=None,
+ attention_mask=None,
+ token_type_ids=None,
+ modal_token_type_ids=None,
+ position_ids=None,
+ modal_position_ids=None,
+ head_mask=None,
+ inputs_embeds=None,
+ labels=None,
+ return_dict=None,
+ ):
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.mmbt(
+ input_modal=input_modal,
+ input_ids=input_ids,
+ modal_start_tokens=modal_start_tokens,
+ modal_end_tokens=modal_end_tokens,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ modal_token_type_ids=modal_token_type_ids,
+ position_ids=position_ids,
+ modal_position_ids=modal_position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ return_dict=return_dict,
+ )
+
+ pooled_output = outputs[1]
+
+ pooled_output = self.dropout(pooled_output)
+ logits = self.classifier(pooled_output)
+
+ loss = None
+ if labels is not None:
+ if self.num_labels == 1:
+ # We are doing regression
+ loss_fct = MSELoss()
+ loss = loss_fct(logits.view(-1), labels.view(-1))
+ else:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..446c9f076d31347c496300f432908d56895f7e67
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__init__.py
@@ -0,0 +1,95 @@
+# Copyright 2023 EleutherAI and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ....utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_sentencepiece_available,
+ is_tokenizers_available,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "configuration_open_llama": ["OPEN_LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "OpenLlamaConfig"],
+}
+
+try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_open_llama"] = ["LlamaTokenizer"]
+
+try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_open_llama_fast"] = ["LlamaTokenizerFast"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_open_llama"] = [
+ "OpenLlamaForCausalLM",
+ "OpenLlamaModel",
+ "OpenLlamaPreTrainedModel",
+ "OpenLlamaForSequenceClassification",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_open_llama import OPEN_LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, OpenLlamaConfig
+
+ try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from transformers import LlamaTokenizer
+
+ try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from transformers import LlamaTokenizerFast
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_open_llama import (
+ OpenLlamaForCausalLM,
+ OpenLlamaForSequenceClassification,
+ OpenLlamaModel,
+ OpenLlamaPreTrainedModel,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fb26271ad6dfb2a41a8edf9565979e183197f3a8
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/configuration_open_llama.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/configuration_open_llama.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7fa1e8c521cbd38c7bb2a0dec67326a4a819ca09
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/configuration_open_llama.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/modeling_open_llama.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/modeling_open_llama.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cf82c02a997c4bb1eb2aeb0e7ec42352dde59e7d
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/modeling_open_llama.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/configuration_open_llama.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/configuration_open_llama.py
new file mode 100644
index 0000000000000000000000000000000000000000..5786abac850dd379e96f3725a63ecd39a1a3947a
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/configuration_open_llama.py
@@ -0,0 +1,168 @@
+# coding=utf-8
+# Copyright 2023 EleutherAI and the HuggingFace Inc. team. All rights reserved.
+#
+# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
+# and OPT implementations in this library. It has been modified from its
+# original forms to accommodate minor architectural differences compared
+# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Open-Llama model configuration"""
+
+from ....configuration_utils import PretrainedConfig
+from ....utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+OPEN_LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
+}
+
+
+class OpenLlamaConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`OpenLlamaModel`]. It is used to instantiate an
+ Open-Llama model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the
+ [s-JoL/Open-Llama-V1](https://huggingface.co/s-JoL/Open-Llama-V1).
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 32000):
+ Vocabulary size of the Open-Llama model. Defines the number of different tokens that can be represented by
+ the `inputs_ids` passed when calling [`OpenLlamaModel`]
+ hidden_size (`int`, *optional*, defaults to 4096):
+ Dimension of the hidden representations.
+ intermediate_size (`int`, *optional*, defaults to 11008):
+ Dimension of the MLP representations.
+ num_hidden_layers (`int`, *optional*, defaults to 32):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 32):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
+ The non-linear activation function (function or string) in the decoder.
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ rms_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the rms normalization layers.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`.
+ tie_word_embeddings(`bool`, *optional*, defaults to `False`):
+ Whether to tie weight embeddings
+ rope_scaling (`Dict`, *optional*):
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
+ `max_position_embeddings` to the expected new maximum. See the following thread for more information on how
+ these scaling strategies behave:
+ https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
+ experimental feature, subject to breaking API changes in future versions.
+
+ Example:
+
+ ```python
+ >>> from transformers import OpenLlamaModel, OpenLlamaConfig
+
+ >>> # Initializing a Open-Llama open_llama-7b style configuration
+ >>> configuration = OpenLlamaConfig()
+
+ >>> # Initializing a model from the open_llama-7b style configuration
+ >>> model = OpenLlamaModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "open-llama"
+
+ def __init__(
+ self,
+ vocab_size=100000,
+ hidden_size=4096,
+ intermediate_size=11008,
+ num_hidden_layers=32,
+ num_attention_heads=32,
+ hidden_act="silu",
+ max_position_embeddings=2048,
+ initializer_range=0.02,
+ rms_norm_eps=1e-6,
+ use_cache=True,
+ pad_token_id=0,
+ bos_token_id=1,
+ eos_token_id=2,
+ tie_word_embeddings=False,
+ use_memory_efficient_attention=True,
+ hidden_dropout_prob=0.1,
+ attention_dropout_prob=0.1,
+ use_stable_embedding=True,
+ shared_input_output_embedding=True,
+ rope_scaling=None,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.hidden_act = hidden_act
+ self.initializer_range = initializer_range
+ self.rms_norm_eps = rms_norm_eps
+ self.use_cache = use_cache
+ self.use_memory_efficient_attention = kwargs.pop(
+ "use_memorry_efficient_attention", use_memory_efficient_attention
+ )
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_dropout_prob = attention_dropout_prob
+ self.use_stable_embedding = use_stable_embedding
+ self.shared_input_output_embedding = shared_input_output_embedding
+ self.rope_scaling = rope_scaling
+ self._rope_scaling_validation()
+
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ tie_word_embeddings=tie_word_embeddings,
+ **kwargs,
+ )
+
+ # Copied from transformers.models.llama.configuration_llama.LlamaConfig._rope_scaling_validation
+ def _rope_scaling_validation(self):
+ """
+ Validate the `rope_scaling` configuration.
+ """
+ if self.rope_scaling is None:
+ return
+
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
+ raise ValueError(
+ "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
+ f"got {self.rope_scaling}"
+ )
+ rope_scaling_type = self.rope_scaling.get("type", None)
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
+ raise ValueError(
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
+ )
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
+ raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/modeling_open_llama.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/modeling_open_llama.py
new file mode 100644
index 0000000000000000000000000000000000000000..71c42447cd2bbe25827455ad76d05695f75ab532
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/modeling_open_llama.py
@@ -0,0 +1,968 @@
+# coding=utf-8
+# Copyright 2023 EleutherAI and the HuggingFace Inc. team. All rights reserved.
+#
+# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
+# and OPT implementations in this library. It has been modified from its
+# original forms to accommodate minor architectural differences compared
+# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch Open-Llama model."""
+import math
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ....activations import ACT2FN
+from ....modeling_attn_mask_utils import _prepare_4d_causal_attention_mask
+from ....modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
+from ....modeling_utils import PreTrainedModel
+from ....utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+from .configuration_open_llama import OpenLlamaConfig
+
+
+logger = logging.get_logger(__name__)
+
+try:
+ from xformers import ops as xops
+except ImportError:
+ xops = None
+
+
+_CONFIG_FOR_DOC = "OpenLlamaConfig"
+
+
+# Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->OpenLlama
+class OpenLlamaRMSNorm(nn.Module):
+ def __init__(self, hidden_size, eps=1e-6):
+ """
+ OpenLlamaRMSNorm is equivalent to T5LayerNorm
+ """
+ super().__init__()
+ self.weight = nn.Parameter(torch.ones(hidden_size))
+ self.variance_epsilon = eps
+
+ def forward(self, hidden_states):
+ input_dtype = hidden_states.dtype
+ hidden_states = hidden_states.to(torch.float32)
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
+ return self.weight * hidden_states.to(input_dtype)
+
+
+# Copied from transformers.models.mistral.modeling_mistral.MistralRotaryEmbedding with Mistral->OpenLlama
+class OpenLlamaRotaryEmbedding(nn.Module):
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
+ super().__init__()
+
+ self.dim = dim
+ self.max_position_embeddings = max_position_embeddings
+ self.base = base
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
+
+ # Build here to make `torch.jit.trace` work.
+ self._set_cos_sin_cache(
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
+ )
+
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
+ self.max_seq_len_cached = seq_len
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
+
+ freqs = torch.outer(t, self.inv_freq)
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
+ emb = torch.cat((freqs, freqs), dim=-1)
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
+
+ def forward(self, x, seq_len=None):
+ # x: [bs, num_attention_heads, seq_len, head_size]
+ if seq_len > self.max_seq_len_cached:
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
+
+ return (
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
+ )
+
+
+# Copied from transformers.models.falcon.modeling_falcon.FalconLinearScalingRotaryEmbedding with Falcon->OpenLlama
+class OpenLlamaLinearScalingRotaryEmbedding(OpenLlamaRotaryEmbedding):
+ """OpenLlamaRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
+
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
+ self.scaling_factor = scaling_factor
+ super().__init__(dim, max_position_embeddings, base, device)
+
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
+ self.max_seq_len_cached = seq_len
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
+ t = t / self.scaling_factor
+
+ freqs = torch.outer(t, self.inv_freq)
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
+ emb = torch.cat((freqs, freqs), dim=-1)
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
+
+
+# Copied from transformers.models.falcon.modeling_falcon.FalconDynamicNTKScalingRotaryEmbedding with Falcon->OpenLlama
+class OpenLlamaDynamicNTKScalingRotaryEmbedding(OpenLlamaRotaryEmbedding):
+ """OpenLlamaRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla"""
+
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
+ self.scaling_factor = scaling_factor
+ super().__init__(dim, max_position_embeddings, base, device)
+
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
+ self.max_seq_len_cached = seq_len
+
+ if seq_len > self.max_position_embeddings:
+ base = self.base * (
+ (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
+ ) ** (self.dim / (self.dim - 2))
+ inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
+
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
+
+ freqs = torch.outer(t, self.inv_freq)
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
+ emb = torch.cat((freqs, freqs), dim=-1)
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
+
+
+def rotate_half(x):
+ """Rotates half the hidden dims of the input."""
+ x1 = x[..., : x.shape[-1] // 2]
+ x2 = x[..., x.shape[-1] // 2 :]
+ return torch.cat((-x2, x1), dim=-1)
+
+
+# Copied from transformers.models.mistral.modeling_mistral.apply_rotary_pos_emb
+def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
+ """Applies Rotary Position Embedding to the query and key tensors.
+
+ Args:
+ q (`torch.Tensor`): The query tensor.
+ k (`torch.Tensor`): The key tensor.
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
+ position_ids (`torch.Tensor`):
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
+ used to pass offsetted position ids when working with a KV-cache.
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
+ Returns:
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
+ """
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
+ q_embed = (q * cos) + (rotate_half(q) * sin)
+ k_embed = (k * cos) + (rotate_half(k) * sin)
+ return q_embed, k_embed
+
+
+class OpenLlamaMLP(nn.Module):
+ def __init__(
+ self,
+ hidden_size: int,
+ intermediate_size: int,
+ hidden_act: str,
+ dropout_prob: float,
+ ):
+ super().__init__()
+ self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
+ self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False)
+ self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
+ self.act_fn = ACT2FN[hidden_act]
+ self.dropout = nn.Dropout(dropout_prob)
+
+ def forward(self, x):
+ out = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
+ return self.dropout(out)
+
+
+class OpenLlamaAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(self, config: OpenLlamaConfig):
+ super().__init__()
+ self.config = config
+ self.hidden_size = config.hidden_size
+ self.num_heads = config.num_attention_heads
+ self.head_dim = self.hidden_size // self.num_heads
+ self.max_position_embeddings = config.max_position_embeddings
+ self.dropout_prob = config.attention_dropout_prob
+
+ if (self.head_dim * self.num_heads) != self.hidden_size:
+ raise ValueError(
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
+ f" and `num_heads`: {self.num_heads})."
+ )
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
+ self.k_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
+ self.v_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
+ self._init_rope()
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaAttention._init_rope with Llama->OpenLlama
+ def _init_rope(self):
+ if self.config.rope_scaling is None:
+ self.rotary_emb = OpenLlamaRotaryEmbedding(
+ self.head_dim,
+ max_position_embeddings=self.max_position_embeddings,
+ base=self.rope_theta,
+ )
+ else:
+ scaling_type = self.config.rope_scaling["type"]
+ scaling_factor = self.config.rope_scaling["factor"]
+ if scaling_type == "linear":
+ self.rotary_emb = OpenLlamaLinearScalingRotaryEmbedding(
+ self.head_dim,
+ max_position_embeddings=self.max_position_embeddings,
+ scaling_factor=scaling_factor,
+ base=self.rope_theta,
+ )
+ elif scaling_type == "dynamic":
+ self.rotary_emb = OpenLlamaDynamicNTKScalingRotaryEmbedding(
+ self.head_dim,
+ max_position_embeddings=self.max_position_embeddings,
+ scaling_factor=scaling_factor,
+ base=self.rope_theta,
+ )
+ else:
+ raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+
+ kv_seq_len = key_states.shape[-2]
+ if past_key_value is not None:
+ kv_seq_len += past_key_value[0].shape[-2]
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
+ # [bsz, nh, t, hd]
+
+ if past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
+
+ past_key_value = (key_states, value_states) if use_cache else None
+
+ if self.config.use_memory_efficient_attention and xops is not None and self.training:
+ attn_weights = None
+ query_states = query_states.transpose(1, 2)
+ key_states = key_states.transpose(1, 2)
+ value_states = value_states.transpose(1, 2)
+ attn_output = xops.memory_efficient_attention(
+ query_states, key_states, value_states, attn_bias=xops.LowerTriangularMask(), p=self.dropout_prob
+ )
+ else:
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
+
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, q_len, kv_seq_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
+ )
+ attn_weights = attn_weights + attention_mask
+ attn_weights = torch.max(
+ attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min, device=attn_weights.device)
+ )
+
+ # upcast attention to fp32
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
+ attn_output = torch.matmul(attn_weights, value_states)
+
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.transpose(1, 2)
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
+
+ attn_output = self.o_proj(attn_output)
+
+ if not output_attentions:
+ attn_weights = None
+
+ return attn_output, attn_weights, past_key_value
+
+
+class OpenLlamaDecoderLayer(nn.Module):
+ def __init__(self, config: OpenLlamaConfig):
+ super().__init__()
+ self.hidden_size = config.hidden_size
+ self.self_attn = OpenLlamaAttention(config=config)
+ self.mlp = OpenLlamaMLP(
+ hidden_size=self.hidden_size,
+ intermediate_size=config.intermediate_size,
+ hidden_act=config.hidden_act,
+ dropout_prob=config.hidden_dropout_prob,
+ )
+ self.input_layernorm = OpenLlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+ self.post_attention_layernorm = OpenLlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = False,
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
+ """
+
+ residual = hidden_states
+
+ hidden_states = self.input_layernorm(hidden_states)
+
+ # Self Attention
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+ hidden_states = residual + hidden_states
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.post_attention_layernorm(hidden_states)
+ hidden_states = self.mlp(hidden_states)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights,)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ return outputs
+
+
+OPEN_LLAMA_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`OpenLlamaConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+@add_start_docstrings(
+ "The bare Open-Llama Model outputting raw hidden-states without any specific head on top.",
+ OPEN_LLAMA_START_DOCSTRING,
+)
+class OpenLlamaPreTrainedModel(PreTrainedModel):
+ config_class = OpenLlamaConfig
+ base_model_prefix = "model"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["OpenLlamaDecoderLayer"]
+
+ def _init_weights(self, module):
+ std = self.config.initializer_range
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ if self.config.use_stable_embedding:
+ torch.nn.init.xavier_normal_(module.weight.data)
+ else:
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+
+OPEN_LLAMA_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
+ information on the default strategy.
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.n_positions - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare Open-Llama Model outputting raw hidden-states without any specific head on top.",
+ OPEN_LLAMA_START_DOCSTRING,
+)
+class OpenLlamaModel(OpenLlamaPreTrainedModel):
+ """
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`OpenLlamaDecoderLayer`]
+
+ Args:
+ config: OpenLlamaConfig
+ """
+
+ def __init__(self, config: OpenLlamaConfig):
+ super().__init__(config)
+ self.padding_idx = config.pad_token_id
+ self.vocab_size = config.vocab_size
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
+ if config.use_stable_embedding:
+ self.embed_layer_norm = nn.LayerNorm(config.hidden_size)
+ else:
+ self.embed_layer_norm = None
+ self.layers = nn.ModuleList([OpenLlamaDecoderLayer(config) for _ in range(config.num_hidden_layers)])
+ self.norm = OpenLlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.embed_tokens = value
+
+ @add_start_docstrings_to_model_forward(OPEN_LLAMA_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # retrieve input_ids and inputs_embeds
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
+ elif input_ids is not None:
+ batch_size, seq_length = input_ids.shape
+ elif inputs_embeds is not None:
+ batch_size, seq_length, _ = inputs_embeds.shape
+ else:
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
+
+ seq_length_with_past = seq_length
+ past_key_values_length = 0
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ if past_key_values is not None:
+ past_key_values_length = past_key_values[0][0].shape[2]
+ seq_length_with_past = seq_length_with_past + past_key_values_length
+
+ if position_ids is None:
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+ position_ids = torch.arange(
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
+ )
+ position_ids = position_ids.unsqueeze(0)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids)
+ if self.embed_layer_norm:
+ inputs_embeds = self.embed_layer_norm(inputs_embeds)
+ # embed positions
+ if self.config.use_memory_efficient_attention and self.training:
+ attention_mask = None
+ elif attention_mask is None:
+ attention_mask = torch.ones(
+ (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
+ )
+
+ input_shape = (batch_size, seq_length)
+ attention_mask = _prepare_4d_causal_attention_mask(
+ attention_mask, input_shape, inputs_embeds, past_key_values_length
+ )
+
+ hidden_states = inputs_embeds
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ next_decoder_cache = () if use_cache else None
+
+ for idx, decoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ position_ids,
+ None,
+ output_attentions,
+ None,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ hidden_states = self.norm(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ next_cache = next_decoder_cache if use_cache else None
+ if not return_dict:
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
+ return BaseModelOutputWithPast(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ )
+
+
+class OpenLlamaForCausalLM(OpenLlamaPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.model = OpenLlamaModel(config)
+ if config.shared_input_output_embedding:
+ self.lm_head = None
+ else:
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.embed_tokens = value
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def set_decoder(self, decoder):
+ self.model = decoder
+
+ def get_decoder(self):
+ return self.model
+
+ @add_start_docstrings_to_model_forward(OPEN_LLAMA_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
+ r"""
+ Args:
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, OpenLlamaForCausalLM
+
+ >>> model = OpenLlamaForCausalLM.from_pretrained("openlm-research/open_llama_7b")
+ >>> tokenizer = AutoTokenizer.from_pretrained("openlm-research/open_llama_7b")
+
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
+
+ >>> # Generate
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
+ ```"""
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
+ outputs = self.model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ if self.config.shared_input_output_embedding:
+ logits = torch.einsum(
+ "blh,vh->blv", hidden_states.to(self.model.embed_tokens.weight.device), self.model.embed_tokens.weight
+ )
+ else:
+ logits = self.lm_head(hidden_states)
+
+ loss = None
+ if labels is not None:
+ # move labels to correct device to enable model parallelism
+ labels = labels.to(logits.device)
+ # Shift so that tokens < n predict n
+ shift_logits = logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous()
+ # Flatten the tokens
+ loss_fct = CrossEntropyLoss()
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
+ shift_labels = shift_labels.view(-1)
+ # Enable model parallelism
+ shift_labels = shift_labels.to(shift_logits.device)
+ loss = loss_fct(shift_logits, shift_labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return (loss,) + output if loss is not None else output
+
+ return CausalLMOutputWithPast(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
+ ):
+ if past_key_values is not None:
+ past_length = past_key_values[0][0].shape[2]
+
+ # Some generation methods already pass only the last input ID
+ if input_ids.shape[1] > past_length:
+ remove_prefix_length = past_length
+ else:
+ # Default to old behavior: keep only final ID
+ remove_prefix_length = input_ids.shape[1] - 1
+
+ input_ids = input_ids[:, remove_prefix_length:]
+
+ position_ids = kwargs.get("position_ids", None)
+ if attention_mask is not None and position_ids is None:
+ # create position_ids on the fly for batch generation
+ position_ids = attention_mask.long().cumsum(-1) - 1
+ position_ids.masked_fill_(attention_mask == 0, 1)
+ if past_key_values:
+ position_ids = position_ids[:, -input_ids.shape[1] :]
+
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
+ if inputs_embeds is not None and past_key_values is None:
+ model_inputs = {"inputs_embeds": inputs_embeds}
+ else:
+ model_inputs = {"input_ids": input_ids}
+
+ model_inputs.update(
+ {
+ "position_ids": position_ids,
+ "past_key_values": past_key_values,
+ "use_cache": kwargs.get("use_cache"),
+ "attention_mask": attention_mask,
+ }
+ )
+ return model_inputs
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+ return reordered_past
+
+
+@add_start_docstrings(
+ """
+ The LLaMa Model transformer with a sequence classification head on top (linear layer).
+
+ [`OpenLlamaForSequenceClassification`] uses the last token in order to do the classification, as other causal
+ models (e.g. GPT-2) do.
+
+ Since it does classification on the last token, it requires to know the position of the last token. If a
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
+ each row of the batch).
+ """,
+ OPEN_LLAMA_START_DOCSTRING,
+)
+class OpenLlamaForSequenceClassification(OpenLlamaPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.model = OpenLlamaModel(config)
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.embed_tokens = value
+
+ @add_start_docstrings_to_model_forward(OPEN_LLAMA_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.model(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = transformer_outputs[0]
+ logits = self.score(hidden_states)
+
+ if input_ids is not None:
+ batch_size = input_ids.shape[0]
+ else:
+ batch_size = inputs_embeds.shape[0]
+
+ if self.config.pad_token_id is None and batch_size != 1:
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
+ if self.config.pad_token_id is None:
+ sequence_lengths = -1
+ else:
+ if input_ids is not None:
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
+ sequence_lengths = sequence_lengths.to(logits.device)
+ else:
+ sequence_lengths = -1
+
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
+
+ loss = None
+ if labels is not None:
+ labels = labels.to(logits.device)
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(pooled_logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(pooled_logits, labels)
+ if not return_dict:
+ output = (pooled_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutputWithPast(
+ loss=loss,
+ logits=pooled_logits,
+ past_key_values=transformer_outputs.past_key_values,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..dba5e14594e16c19fc1a269a92e968fec35afc26
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__init__.py
@@ -0,0 +1,73 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
+
+
+_import_structure = {
+ "configuration_retribert": ["RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RetriBertConfig"],
+ "tokenization_retribert": ["RetriBertTokenizer"],
+}
+
+try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_retribert_fast"] = ["RetriBertTokenizerFast"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_retribert"] = [
+ "RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "RetriBertModel",
+ "RetriBertPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_retribert import RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RetriBertConfig
+ from .tokenization_retribert import RetriBertTokenizer
+
+ try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_retribert_fast import RetriBertTokenizerFast
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_retribert import (
+ RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ RetriBertModel,
+ RetriBertPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/configuration_retribert.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/configuration_retribert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..602396b634e499f978017f3f6ffe0696c35eb779
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/configuration_retribert.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/modeling_retribert.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/modeling_retribert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e33128e35d93bd4814f0488f536c8516b9b0c5a7
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/modeling_retribert.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/tokenization_retribert.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/tokenization_retribert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8705498770fa70958d5bed5bdbe2910d646f4860
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/tokenization_retribert.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/tokenization_retribert_fast.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/tokenization_retribert_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2d035c3d82fd0fe3936683b29c4d6020e8d2548b
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/tokenization_retribert_fast.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/configuration_retribert.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/configuration_retribert.py
new file mode 100644
index 0000000000000000000000000000000000000000..3861b9c90f33ef385526ef256123721adc993116
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/configuration_retribert.py
@@ -0,0 +1,112 @@
+# coding=utf-8
+# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" RetriBERT model configuration"""
+
+from ....configuration_utils import PretrainedConfig
+from ....utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+# TODO: upload to AWS
+RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "yjernite/retribert-base-uncased": (
+ "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
+ ),
+}
+
+
+class RetriBertConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`RetriBertModel`]. It is used to instantiate a
+ RetriBertModel model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the RetriBERT
+ [yjernite/retribert-base-uncased](https://huggingface.co/yjernite/retribert-base-uncased) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 30522):
+ Vocabulary size of the RetriBERT model. Defines the number of different tokens that can be represented by
+ the `inputs_ids` passed when calling [`RetriBertModel`]
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ type_vocab_size (`int`, *optional*, defaults to 2):
+ The vocabulary size of the *token_type_ids* passed into [`BertModel`].
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ share_encoders (`bool`, *optional*, defaults to `True`):
+ Whether or not to use the same Bert-type encoder for the queries and document
+ projection_dim (`int`, *optional*, defaults to 128):
+ Final dimension of the query and document representation after projection
+ """
+
+ model_type = "retribert"
+
+ def __init__(
+ self,
+ vocab_size=30522,
+ hidden_size=768,
+ num_hidden_layers=8,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.1,
+ attention_probs_dropout_prob=0.1,
+ max_position_embeddings=512,
+ type_vocab_size=2,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ share_encoders=True,
+ projection_dim=128,
+ pad_token_id=0,
+ **kwargs,
+ ):
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
+
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.hidden_act = hidden_act
+ self.intermediate_size = intermediate_size
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.max_position_embeddings = max_position_embeddings
+ self.type_vocab_size = type_vocab_size
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.share_encoders = share_encoders
+ self.projection_dim = projection_dim
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/modeling_retribert.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/modeling_retribert.py
new file mode 100644
index 0000000000000000000000000000000000000000..00d47bce5121d4fafd81ee3fe88b408e87ec8e40
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/modeling_retribert.py
@@ -0,0 +1,220 @@
+# coding=utf-8
+# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+RetriBERT model
+"""
+
+
+import math
+from typing import Optional
+
+import torch
+import torch.utils.checkpoint as checkpoint
+from torch import nn
+
+from ....modeling_utils import PreTrainedModel
+from ....utils import add_start_docstrings, logging
+from ...bert.modeling_bert import BertModel
+from .configuration_retribert import RetriBertConfig
+
+
+logger = logging.get_logger(__name__)
+
+RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "yjernite/retribert-base-uncased",
+ # See all RetriBert models at https://huggingface.co/models?filter=retribert
+]
+
+
+# INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL #
+class RetriBertPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = RetriBertConfig
+ load_tf_weights = None
+ base_model_prefix = "retribert"
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+RETRIBERT_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`RetriBertConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+@add_start_docstrings(
+ """Bert Based model to embed queries or document for document retrieval.""",
+ RETRIBERT_START_DOCSTRING,
+)
+class RetriBertModel(RetriBertPreTrainedModel):
+ def __init__(self, config: RetriBertConfig) -> None:
+ super().__init__(config)
+ self.projection_dim = config.projection_dim
+
+ self.bert_query = BertModel(config)
+ self.bert_doc = None if config.share_encoders else BertModel(config)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.project_query = nn.Linear(config.hidden_size, config.projection_dim, bias=False)
+ self.project_doc = nn.Linear(config.hidden_size, config.projection_dim, bias=False)
+
+ self.ce_loss = nn.CrossEntropyLoss(reduction="mean")
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def embed_sentences_checkpointed(
+ self,
+ input_ids,
+ attention_mask,
+ sent_encoder,
+ checkpoint_batch_size=-1,
+ ):
+ # reproduces BERT forward pass with checkpointing
+ if checkpoint_batch_size < 0 or input_ids.shape[0] < checkpoint_batch_size:
+ return sent_encoder(input_ids, attention_mask=attention_mask)[1]
+ else:
+ # prepare implicit variables
+ device = input_ids.device
+ input_shape = input_ids.size()
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
+ head_mask = [None] * sent_encoder.config.num_hidden_layers
+ extended_attention_mask: torch.Tensor = sent_encoder.get_extended_attention_mask(
+ attention_mask, input_shape
+ )
+
+ # define function for checkpointing
+ def partial_encode(*inputs):
+ encoder_outputs = sent_encoder.encoder(
+ inputs[0],
+ attention_mask=inputs[1],
+ head_mask=head_mask,
+ )
+ sequence_output = encoder_outputs[0]
+ pooled_output = sent_encoder.pooler(sequence_output)
+ return pooled_output
+
+ # run embedding layer on everything at once
+ embedding_output = sent_encoder.embeddings(
+ input_ids=input_ids, position_ids=None, token_type_ids=token_type_ids, inputs_embeds=None
+ )
+ # run encoding and pooling on one mini-batch at a time
+ pooled_output_list = []
+ for b in range(math.ceil(input_ids.shape[0] / checkpoint_batch_size)):
+ b_embedding_output = embedding_output[b * checkpoint_batch_size : (b + 1) * checkpoint_batch_size]
+ b_attention_mask = extended_attention_mask[b * checkpoint_batch_size : (b + 1) * checkpoint_batch_size]
+ pooled_output = checkpoint.checkpoint(partial_encode, b_embedding_output, b_attention_mask)
+ pooled_output_list.append(pooled_output)
+ return torch.cat(pooled_output_list, dim=0)
+
+ def embed_questions(
+ self,
+ input_ids,
+ attention_mask=None,
+ checkpoint_batch_size=-1,
+ ):
+ q_reps = self.embed_sentences_checkpointed(
+ input_ids,
+ attention_mask,
+ self.bert_query,
+ checkpoint_batch_size,
+ )
+ return self.project_query(q_reps)
+
+ def embed_answers(
+ self,
+ input_ids,
+ attention_mask=None,
+ checkpoint_batch_size=-1,
+ ):
+ a_reps = self.embed_sentences_checkpointed(
+ input_ids,
+ attention_mask,
+ self.bert_query if self.bert_doc is None else self.bert_doc,
+ checkpoint_batch_size,
+ )
+ return self.project_doc(a_reps)
+
+ def forward(
+ self,
+ input_ids_query: torch.LongTensor,
+ attention_mask_query: Optional[torch.FloatTensor],
+ input_ids_doc: torch.LongTensor,
+ attention_mask_doc: Optional[torch.FloatTensor],
+ checkpoint_batch_size: int = -1,
+ ) -> torch.FloatTensor:
+ r"""
+ Args:
+ input_ids_query (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary for the queries in a batch.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask_query (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ input_ids_doc (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary for the documents in a batch.
+ attention_mask_doc (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on documents padding token indices.
+ checkpoint_batch_size (`int`, *optional*, defaults to `-1`):
+ If greater than 0, uses gradient checkpointing to only compute sequence representation on
+ `checkpoint_batch_size` examples at a time on the GPU. All query representations are still compared to
+ all document representations in the batch.
+
+ Return:
+ `torch.FloatTensor``: The bidirectional cross-entropy loss obtained while trying to match each query to its
+ corresponding document and each document to its corresponding query in the batch
+ """
+ device = input_ids_query.device
+ q_reps = self.embed_questions(input_ids_query, attention_mask_query, checkpoint_batch_size)
+ a_reps = self.embed_answers(input_ids_doc, attention_mask_doc, checkpoint_batch_size)
+ compare_scores = torch.mm(q_reps, a_reps.t())
+ loss_qa = self.ce_loss(compare_scores, torch.arange(compare_scores.shape[1]).to(device))
+ loss_aq = self.ce_loss(compare_scores.t(), torch.arange(compare_scores.shape[0]).to(device))
+ loss = (loss_qa + loss_aq) / 2
+ return loss
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/tokenization_retribert.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/tokenization_retribert.py
new file mode 100644
index 0000000000000000000000000000000000000000..d0904e3c931e40264cef08c252834976cb92255a
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/tokenization_retribert.py
@@ -0,0 +1,537 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for RetriBERT."""
+
+import collections
+import os
+import unicodedata
+from typing import List, Optional, Tuple
+
+from ....tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
+from ....utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {
+ "yjernite/retribert-base-uncased": (
+ "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
+ ),
+ }
+}
+
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
+ "yjernite/retribert-base-uncased": 512,
+}
+
+
+PRETRAINED_INIT_CONFIGURATION = {
+ "yjernite/retribert-base-uncased": {"do_lower_case": True},
+}
+
+
+# Copied from transformers.models.bert.tokenization_bert.load_vocab
+def load_vocab(vocab_file):
+ """Loads a vocabulary file into a dictionary."""
+ vocab = collections.OrderedDict()
+ with open(vocab_file, "r", encoding="utf-8") as reader:
+ tokens = reader.readlines()
+ for index, token in enumerate(tokens):
+ token = token.rstrip("\n")
+ vocab[token] = index
+ return vocab
+
+
+# Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
+def whitespace_tokenize(text):
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
+ text = text.strip()
+ if not text:
+ return []
+ tokens = text.split()
+ return tokens
+
+
+class RetriBertTokenizer(PreTrainedTokenizer):
+ r"""
+ Constructs a RetriBERT tokenizer.
+
+ [`RetriBertTokenizer`] is identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation splitting
+ and wordpiece.
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer
+ to: this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ File containing the vocabulary.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
+ Whether or not to do basic tokenization before WordPiece.
+ never_split (`Iterable`, *optional*):
+ Collection of tokens which will never be split during tokenization. Only has an effect when
+ `do_basic_tokenize=True`
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
+ The token used for padding, for example when batching sequences of different lengths.
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this
+ [issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
+ model_input_names = ["input_ids", "attention_mask"]
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.__init__
+ def __init__(
+ self,
+ vocab_file,
+ do_lower_case=True,
+ do_basic_tokenize=True,
+ never_split=None,
+ unk_token="[UNK]",
+ sep_token="[SEP]",
+ pad_token="[PAD]",
+ cls_token="[CLS]",
+ mask_token="[MASK]",
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ **kwargs,
+ ):
+ if not os.path.isfile(vocab_file):
+ raise ValueError(
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
+ " model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
+ )
+ self.vocab = load_vocab(vocab_file)
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
+ self.do_basic_tokenize = do_basic_tokenize
+ if do_basic_tokenize:
+ self.basic_tokenizer = BasicTokenizer(
+ do_lower_case=do_lower_case,
+ never_split=never_split,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ )
+
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
+
+ super().__init__(
+ do_lower_case=do_lower_case,
+ do_basic_tokenize=do_basic_tokenize,
+ never_split=never_split,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ **kwargs,
+ )
+
+ @property
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.do_lower_case
+ def do_lower_case(self):
+ return self.basic_tokenizer.do_lower_case
+
+ @property
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.vocab_size
+ def vocab_size(self):
+ return len(self.vocab)
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_vocab
+ def get_vocab(self):
+ return dict(self.vocab, **self.added_tokens_encoder)
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._tokenize
+ def _tokenize(self, text, split_special_tokens=False):
+ split_tokens = []
+ if self.do_basic_tokenize:
+ for token in self.basic_tokenizer.tokenize(
+ text, never_split=self.all_special_tokens if not split_special_tokens else None
+ ):
+ # If the token is part of the never_split set
+ if token in self.basic_tokenizer.never_split:
+ split_tokens.append(token)
+ else:
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
+ else:
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
+ return split_tokens
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_token_to_id
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_id_to_token
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.ids_to_tokens.get(index, self.unk_token)
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.convert_tokens_to_string
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ out_string = " ".join(tokens).replace(" ##", "").strip()
+ return out_string
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.build_inputs_with_special_tokens
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A BERT sequence has the following format:
+
+ - single sequence: `[CLS] X [SEP]`
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ return cls + token_ids_0 + sep + token_ids_1 + sep
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_special_tokens_mask
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is not None:
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1]
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.create_token_type_ids_from_sequences
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
+ pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.save_vocabulary
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ index = 0
+ if os.path.isdir(save_directory):
+ vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+ else:
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
+ with open(vocab_file, "w", encoding="utf-8") as writer:
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
+ if index != token_index:
+ logger.warning(
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
+ " Please check that the vocabulary is not corrupted!"
+ )
+ index = token_index
+ writer.write(token + "\n")
+ index += 1
+ return (vocab_file,)
+
+
+# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
+class BasicTokenizer(object):
+ """
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
+
+ Args:
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ never_split (`Iterable`, *optional*):
+ Collection of tokens which will never be split during tokenization. Only has an effect when
+ `do_basic_tokenize=True`
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters.
+
+ This should likely be deactivated for Japanese (see this
+ [issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
+ the full context of the words, such as contractions.
+ """
+
+ def __init__(
+ self,
+ do_lower_case=True,
+ never_split=None,
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ do_split_on_punc=True,
+ ):
+ if never_split is None:
+ never_split = []
+ self.do_lower_case = do_lower_case
+ self.never_split = set(never_split)
+ self.tokenize_chinese_chars = tokenize_chinese_chars
+ self.strip_accents = strip_accents
+ self.do_split_on_punc = do_split_on_punc
+
+ def tokenize(self, text, never_split=None):
+ """
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
+
+ Args:
+ never_split (`List[str]`, *optional*)
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
+ """
+ # union() returns a new set by concatenating the two sets.
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
+ text = self._clean_text(text)
+
+ # This was added on November 1st, 2018 for the multilingual and Chinese
+ # models. This is also applied to the English models now, but it doesn't
+ # matter since the English models were not trained on any Chinese data
+ # and generally don't have any Chinese data in them (there are Chinese
+ # characters in the vocabulary because Wikipedia does have some Chinese
+ # words in the English Wikipedia.).
+ if self.tokenize_chinese_chars:
+ text = self._tokenize_chinese_chars(text)
+ # prevents treating the same character with different unicode codepoints as different characters
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
+ split_tokens = []
+ for token in orig_tokens:
+ if token not in never_split:
+ if self.do_lower_case:
+ token = token.lower()
+ if self.strip_accents is not False:
+ token = self._run_strip_accents(token)
+ elif self.strip_accents:
+ token = self._run_strip_accents(token)
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
+
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
+ return output_tokens
+
+ def _run_strip_accents(self, text):
+ """Strips accents from a piece of text."""
+ text = unicodedata.normalize("NFD", text)
+ output = []
+ for char in text:
+ cat = unicodedata.category(char)
+ if cat == "Mn":
+ continue
+ output.append(char)
+ return "".join(output)
+
+ def _run_split_on_punc(self, text, never_split=None):
+ """Splits punctuation on a piece of text."""
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
+ return [text]
+ chars = list(text)
+ i = 0
+ start_new_word = True
+ output = []
+ while i < len(chars):
+ char = chars[i]
+ if _is_punctuation(char):
+ output.append([char])
+ start_new_word = True
+ else:
+ if start_new_word:
+ output.append([])
+ start_new_word = False
+ output[-1].append(char)
+ i += 1
+
+ return ["".join(x) for x in output]
+
+ def _tokenize_chinese_chars(self, text):
+ """Adds whitespace around any CJK character."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if self._is_chinese_char(cp):
+ output.append(" ")
+ output.append(char)
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+ def _is_chinese_char(self, cp):
+ """Checks whether CP is the codepoint of a CJK character."""
+ # This defines a "chinese character" as anything in the CJK Unicode block:
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
+ #
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
+ # despite its name. The modern Korean Hangul alphabet is a different block,
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
+ # space-separated words, so they are not treated specially and handled
+ # like the all of the other languages.
+ if (
+ (cp >= 0x4E00 and cp <= 0x9FFF)
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
+ or (cp >= 0xF900 and cp <= 0xFAFF)
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
+ ): #
+ return True
+
+ return False
+
+ def _clean_text(self, text):
+ """Performs invalid character removal and whitespace cleanup on text."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
+ continue
+ if _is_whitespace(char):
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+
+# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
+class WordpieceTokenizer(object):
+ """Runs WordPiece tokenization."""
+
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
+ self.vocab = vocab
+ self.unk_token = unk_token
+ self.max_input_chars_per_word = max_input_chars_per_word
+
+ def tokenize(self, text):
+ """
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
+ tokenization using the given vocabulary.
+
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
+
+ Args:
+ text: A single token or whitespace separated tokens. This should have
+ already been passed through *BasicTokenizer*.
+
+ Returns:
+ A list of wordpiece tokens.
+ """
+
+ output_tokens = []
+ for token in whitespace_tokenize(text):
+ chars = list(token)
+ if len(chars) > self.max_input_chars_per_word:
+ output_tokens.append(self.unk_token)
+ continue
+
+ is_bad = False
+ start = 0
+ sub_tokens = []
+ while start < len(chars):
+ end = len(chars)
+ cur_substr = None
+ while start < end:
+ substr = "".join(chars[start:end])
+ if start > 0:
+ substr = "##" + substr
+ if substr in self.vocab:
+ cur_substr = substr
+ break
+ end -= 1
+ if cur_substr is None:
+ is_bad = True
+ break
+ sub_tokens.append(cur_substr)
+ start = end
+
+ if is_bad:
+ output_tokens.append(self.unk_token)
+ else:
+ output_tokens.extend(sub_tokens)
+ return output_tokens
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/tokenization_retribert_fast.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/tokenization_retribert_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..07f7964b9f3f8e1da0f6b54494e28ba09df192a1
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/tokenization_retribert_fast.py
@@ -0,0 +1,205 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for RetriBERT."""
+
+import json
+from typing import List, Optional, Tuple
+
+from tokenizers import normalizers
+
+from ....tokenization_utils_fast import PreTrainedTokenizerFast
+from ....utils import logging
+from .tokenization_retribert import RetriBertTokenizer
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {
+ "yjernite/retribert-base-uncased": (
+ "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
+ ),
+ },
+ "tokenizer_file": {
+ "yjernite/retribert-base-uncased": (
+ "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
+ ),
+ },
+}
+
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
+ "yjernite/retribert-base-uncased": 512,
+}
+
+
+PRETRAINED_INIT_CONFIGURATION = {
+ "yjernite/retribert-base-uncased": {"do_lower_case": True},
+}
+
+
+class RetriBertTokenizerFast(PreTrainedTokenizerFast):
+ r"""
+ Construct a "fast" RetriBERT tokenizer (backed by HuggingFace's *tokenizers* library).
+
+ [`RetriBertTokenizerFast`] is identical to [`BertTokenizerFast`] and runs end-to-end tokenization: punctuation
+ splitting and wordpiece.
+
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
+ refer to this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ File containing the vocabulary.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
+ The token used for padding, for example when batching sequences of different lengths.
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ clean_text (`bool`, *optional*, defaults to `True`):
+ Whether or not to clean the text before tokenization by removing any control characters and replacing all
+ whitespaces by the classic one.
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
+ issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
+ The prefix for subwords.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
+ slow_tokenizer_class = RetriBertTokenizer
+ model_input_names = ["input_ids", "attention_mask"]
+
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.__init__
+ def __init__(
+ self,
+ vocab_file=None,
+ tokenizer_file=None,
+ do_lower_case=True,
+ unk_token="[UNK]",
+ sep_token="[SEP]",
+ pad_token="[PAD]",
+ cls_token="[CLS]",
+ mask_token="[MASK]",
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ **kwargs,
+ ):
+ super().__init__(
+ vocab_file,
+ tokenizer_file=tokenizer_file,
+ do_lower_case=do_lower_case,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ **kwargs,
+ )
+
+ normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
+ if (
+ normalizer_state.get("lowercase", do_lower_case) != do_lower_case
+ or normalizer_state.get("strip_accents", strip_accents) != strip_accents
+ or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
+ ):
+ normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
+ normalizer_state["lowercase"] = do_lower_case
+ normalizer_state["strip_accents"] = strip_accents
+ normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
+ self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
+
+ self.do_lower_case = do_lower_case
+
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.build_inputs_with_special_tokens
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A BERT sequence has the following format:
+
+ - single sequence: `[CLS] X [SEP]`
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+
+ if token_ids_1 is not None:
+ output += token_ids_1 + [self.sep_token_id]
+
+ return output
+
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.create_token_type_ids_from_sequences
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
+ pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.save_vocabulary
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
+ return tuple(files)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/tapex/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/tapex/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..82bbacd15b0d00509972e16ac406005ee97370f7
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/tapex/__init__.py
@@ -0,0 +1,29 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ....utils import _LazyModule
+
+
+_import_structure = {"tokenization_tapex": ["TapexTokenizer"]}
+
+
+if TYPE_CHECKING:
+ from .tokenization_tapex import TapexTokenizer
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/tapex/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/tapex/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1655e28a6eeba24b9aad46a4d1babab1dc7c6c9d
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/tapex/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/tapex/__pycache__/tokenization_tapex.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/tapex/__pycache__/tokenization_tapex.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..283fc1290f8190fa35dd3309a48312a6299231e8
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/tapex/__pycache__/tokenization_tapex.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/tapex/tokenization_tapex.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/tapex/tokenization_tapex.py
new file mode 100644
index 0000000000000000000000000000000000000000..a5ee093c56bd2680ca480713674a40bdc68483a6
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/tapex/tokenization_tapex.py
@@ -0,0 +1,1487 @@
+# coding=utf-8
+# Copyright 2022 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for TAPEX."""
+
+import json
+import os
+import random
+from functools import lru_cache
+from typing import Dict, List, Optional, Tuple, Union
+
+import regex as re
+
+from ....file_utils import ExplicitEnum, PaddingStrategy, TensorType, add_end_docstrings, is_pandas_available
+from ....tokenization_utils import AddedToken, PreTrainedTokenizer
+from ....tokenization_utils_base import ENCODE_KWARGS_DOCSTRING, BatchEncoding, TextInput, TruncationStrategy
+from ....utils import logging
+
+
+if is_pandas_available():
+ import pandas as pd
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {
+ "microsoft/tapex-base": "https://huggingface.co/microsoft/tapex-base/resolve/main/vocab.json",
+ },
+ "merges_file": {
+ "microsoft/tapex-base": "https://huggingface.co/microsoft/tapex-base/resolve/main/merges.txt",
+ },
+}
+
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
+ "microsoft/tapex-base": 512,
+}
+
+PRETRAINED_INIT_CONFIGURATION = {
+ "microsoft/tapex-base": {"do_lower_case": True},
+}
+
+
+class TapexTruncationStrategy(ExplicitEnum):
+ """
+ Possible values for the `truncation` argument in [`~TapasTokenizer.__call__`]. Useful for tab-completion in an IDE.
+ """
+
+ DROP_ROWS_TO_FIT = "drop_rows_to_fit"
+
+
+TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r"""
+ add_special_tokens (`bool`, *optional*, defaults to `True`):
+ Whether or not to encode the sequences with the special tokens relative to their model.
+ padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
+ Activates and controls padding. Accepts the following values:
+
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
+ sequence if provided).
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
+ acceptable input length for the model if that argument is not provided.
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
+ lengths).
+ truncation (`bool`, `str`, [`TapexTruncationStrategy`] or [`~tokenization_utils_base.TruncationStrategy`],
+ *optional*, defaults to `False`):
+
+ Activates and controls truncation. Accepts the following values:
+
+ - `'drop_rows_to_fit'`: Truncate to a maximum length specified with the argument `max_length` or to the
+ maximum acceptable input length for the model if that argument is not provided. This will truncate
+ row by row, removing rows from the table.
+ - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
+ to the maximum acceptable input length for the model if that argument is not provided. This will
+ truncate token by token, removing a token from the longest sequence in the pair if a pair of
+ sequences (or a batch of pairs) is provided.
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
+ maximum acceptable input length for the model if that argument is not provided. This will only
+ truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
+ maximum acceptable input length for the model if that argument is not provided. This will only
+ truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
+ - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
+ greater than the model maximum admissible input size).
+ max_length (`int`, *optional*):
+ Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to
+ `None`, this will use the predefined model maximum length if a maximum length is required by one of the
+ truncation/padding parameters. If the model has no specific maximum input length (like XLNet)
+ truncation/padding to a maximum length will be deactivated.
+ stride (`int`, *optional*, defaults to 0):
+ If set to a number along with `max_length`, the overflowing tokens returned when
+ `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
+ returned to provide some overlap between truncated and overflowing sequences. The value of this
+ argument defines the number of overlapping tokens.
+ pad_to_multiple_of (`int`, *optional*):
+ If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
+ the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
+ return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
+ If set, will return tensors instead of list of python integers. Acceptable values are:
+
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
+ - `'np'`: Return Numpy `np.ndarray` objects.
+"""
+
+
+@lru_cache()
+def bytes_to_unicode():
+ """
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
+ characters the bpe code barfs on. The reversible bpe codes work on unicode strings. This means you need a large #
+ of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset
+ you end up needing around 5K for decent coverage. This is a significant percentage of your normal, say, 32K bpe
+ vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
+ """
+ bs = (
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
+ )
+ cs = bs[:]
+ n = 0
+ for b in range(2**8):
+ if b not in bs:
+ bs.append(b)
+ cs.append(2**8 + n)
+ n += 1
+ cs = [chr(n) for n in cs]
+ return dict(zip(bs, cs))
+
+
+def get_pairs(word):
+ """
+ Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length
+ strings).
+ """
+ pairs = set()
+ prev_char = word[0]
+ for char in word[1:]:
+ pairs.add((prev_char, char))
+ prev_char = char
+ return pairs
+
+
+class IndexedRowTableLinearize:
+ """
+ FORMAT: col: col1 | col2 | col 3 row 1 : val1 | val2 | val3 row 2 : ...
+ """
+
+ def process_table(self, table_content: Dict):
+ """
+ Given a table, TableLinearize aims at converting it into a flatten sequence with special symbols.
+ """
+ assert "header" in table_content and "rows" in table_content, self.PROMPT_MESSAGE
+ # process header
+ table_str = self.process_header(table_content["header"]) + " "
+ # process rows
+ for i, row_example in enumerate(table_content["rows"]):
+ # NOTE: the row should start from row 1 instead of 0
+ table_str += self.process_row(row_example, row_index=i + 1) + " "
+ return table_str.strip()
+
+ def process_header(self, headers: List):
+ """
+ Given a list of headers, TableLinearize aims at converting it into a flatten sequence with special symbols.
+ """
+ return "col : " + " | ".join(headers)
+
+ def process_row(self, row: List, row_index: int):
+ """
+ Given a row, TableLinearize aims at converting it into a flatten sequence with special symbols.
+ """
+ row_str = ""
+ row_cell_values = []
+ for cell_value in row:
+ if isinstance(cell_value, int):
+ row_cell_values.append(str(cell_value))
+ else:
+ row_cell_values.append(cell_value)
+ row_str += " | ".join(row_cell_values)
+ return "row " + str(row_index) + " : " + row_str
+
+
+class TapexTokenizer(PreTrainedTokenizer):
+ r"""
+ Construct a TAPEX tokenizer. Based on byte-level Byte-Pair-Encoding (BPE).
+
+ This tokenizer can be used to flatten one or more table(s) and concatenate them with one or more related sentences
+ to be used by TAPEX models. The format that the TAPEX tokenizer creates is the following:
+
+ sentence col: col1 | col2 | col 3 row 1 : val1 | val2 | val3 row 2 : ...
+
+ The tokenizer supports a single table + single query, a single table and multiple queries (in which case the table
+ will be duplicated for every query), a single query and multiple tables (in which case the query will be duplicated
+ for every table), and multiple tables and queries. In other words, you can provide a batch of tables + questions to
+ the tokenizer for instance to prepare them for the model.
+
+ Tokenization itself is based on the BPE algorithm. It is identical to the one used by BART, RoBERTa and GPT-2.
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ merges_file (`str`):
+ Path to the merges file.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ errors (`str`, *optional*, defaults to `"replace"`):
+ Paradigm to follow when decoding bytes to UTF-8. See
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
+ The token used is the `sep_token`.
+
+
+
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ cls_token (`str`, *optional*, defaults to `""`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ mask_token (`str`, *optional*, defaults to `""`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
+ other word. (BART tokenizer detect beginning of words by the preceding space).
+ max_cell_length (`int`, *optional*, defaults to 15):
+ Maximum number of characters per cell when linearizing a table. If this number is exceeded, truncation
+ takes place.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file,
+ merges_file,
+ do_lower_case=True,
+ errors="replace",
+ bos_token="",
+ eos_token="",
+ sep_token="",
+ cls_token="",
+ unk_token="",
+ pad_token="",
+ mask_token="",
+ add_prefix_space=False,
+ max_cell_length=15,
+ **kwargs,
+ ):
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
+ sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
+ cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
+
+ # Mask token behave like a normal word, i.e. include the space before it
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
+
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
+ self.encoder = json.load(vocab_handle)
+ self.decoder = {v: k for k, v in self.encoder.items()}
+ self.errors = errors # how to handle errors in decoding
+ self.byte_encoder = bytes_to_unicode()
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
+ with open(merges_file, encoding="utf-8") as merges_handle:
+ bpe_merges = merges_handle.read().split("\n")[1:-1]
+ bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
+ self.cache = {}
+ self.add_prefix_space = add_prefix_space
+ self.do_lower_case = do_lower_case
+
+ # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
+ self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
+
+ # additional properties
+
+ super().__init__(
+ vocab_file=vocab_file,
+ merges_file=merges_file,
+ do_lower_case=do_lower_case,
+ errors=errors,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ cls_token=cls_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ add_prefix_space=add_prefix_space,
+ max_cell_length=max_cell_length,
+ **kwargs,
+ )
+
+ self.max_cell_length = max_cell_length
+ self.table_linearize = IndexedRowTableLinearize()
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A TAPEX sequence has the following format:
+ - single sequence: ` X `
+ - pair of sequences: ` A B `
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Args:
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is None:
+ return [1] + ([0] * len(token_ids_0)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Args:
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. TAPEX does not:
+ make use of token type ids, therefore a list of zeros is returned.
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ Returns:
+ `List[int]`: List of zeros.
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
+
+ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
+ add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
+ if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()):
+ text = " " + text
+ return (text, kwargs)
+
+ @property
+ def vocab_size(self):
+ return len(self.encoder)
+
+ def get_vocab(self):
+ return dict(self.encoder, **self.added_tokens_encoder)
+
+ def bpe(self, token):
+ if token in self.cache:
+ return self.cache[token]
+ word = tuple(token)
+ pairs = get_pairs(word)
+
+ if not pairs:
+ return token
+
+ while True:
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
+ if bigram not in self.bpe_ranks:
+ break
+ first, second = bigram
+ new_word = []
+ i = 0
+ while i < len(word):
+ try:
+ j = word.index(first, i)
+ except ValueError:
+ new_word.extend(word[i:])
+ break
+ else:
+ new_word.extend(word[i:j])
+ i = j
+
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
+ new_word.append(first + second)
+ i += 2
+ else:
+ new_word.append(word[i])
+ i += 1
+ new_word = tuple(new_word)
+ word = new_word
+ if len(word) == 1:
+ break
+ else:
+ pairs = get_pairs(word)
+ word = " ".join(word)
+ self.cache[token] = word
+ return word
+
+ def _tokenize(self, text):
+ """Tokenize a string."""
+ bpe_tokens = []
+ for token in re.findall(self.pat, text):
+ token = "".join(
+ self.byte_encoder[b] for b in token.encode("utf-8")
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
+ bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
+ return bpe_tokens
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.decoder.get(index)
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ text = "".join(tokens)
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
+ return text
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+ merge_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
+ )
+
+ with open(vocab_file, "w", encoding="utf-8") as f:
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
+
+ index = 0
+ with open(merge_file, "w", encoding="utf-8") as writer:
+ writer.write("#version: 0.2\n")
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
+ if index != token_index:
+ logger.warning(
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
+ " Please check that the tokenizer is not corrupted!"
+ )
+ index = token_index
+ writer.write(" ".join(bpe_tokens) + "\n")
+ index += 1
+
+ return vocab_file, merge_file
+
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
+ def __call__(
+ self,
+ table: Union["pd.DataFrame", List["pd.DataFrame"]] = None,
+ query: Optional[Union[TextInput, List[TextInput]]] = None,
+ answer: Union[str, List[str]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ Main method to tokenize and prepare for the model one or several table-sequence pair(s).
+
+ Args:
+ table (`pd.DataFrame`, `List[pd.DataFrame]`):
+ Table(s) containing tabular data.
+ query (`str` or `List[str]`, *optional*):
+ Sentence or batch of sentences related to one or more table(s) to be encoded. Note that the number of
+ sentences must match the number of tables.
+ answer (`str` or `List[str]`, *optional*):
+ Optionally, the corresponding answer to the questions as supervision.
+ """
+
+ if table is not None:
+ return self.source_call_func(
+ table=table,
+ query=query,
+ answer=answer,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+ elif answer is not None:
+ return self.target_call_func(
+ answer=answer,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+ else:
+ raise ValueError("You need to provide either a `table` or an `answer`.")
+
+ def source_call_func(
+ self,
+ table: Union["pd.DataFrame", List["pd.DataFrame"]],
+ query: Optional[Union[TextInput, List[TextInput]]] = None,
+ answer: Union[str, List[str]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ # Input type checking for clearer error
+ valid_table = False
+ valid_query = False
+
+ # Check that table have a valid type
+ if isinstance(table, pd.DataFrame):
+ valid_table = True
+ elif isinstance(table, (list, tuple)) and isinstance(table[0], pd.DataFrame):
+ valid_table = True
+
+ # Check that query have a valid type
+ if query is None or isinstance(query, str):
+ valid_query = True
+ elif isinstance(query, (list, tuple)):
+ if len(query) == 0 or isinstance(query[0], str):
+ valid_query = True
+
+ if not valid_table:
+ raise ValueError(
+ "table input must of type `pd.DataFrame` (single example), `List[pd.DataFrame]` (batch of examples). "
+ )
+ if not valid_query:
+ raise ValueError("query input must of type `str` (single example), `List[str]` (batch of examples). ")
+ is_batched = isinstance(table, (list, tuple)) or isinstance(query, (list, tuple))
+
+ if is_batched:
+ return self.batch_encode_plus(
+ table=table,
+ query=query,
+ answer=answer,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+ else:
+ return self.encode_plus(
+ table=table,
+ query=query,
+ answer=answer,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
+ def batch_encode_plus(
+ self,
+ table: Union["pd.DataFrame", List["pd.DataFrame"]],
+ query: Optional[List[TextInput]] = None,
+ answer: List[str] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str] = None,
+ max_length: Optional[int] = None,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+
+
+ This method is deprecated, `__call__` should be used instead.
+
+
+ """
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ return self._batch_encode_plus(
+ table=table,
+ query=query,
+ answer=answer,
+ add_special_tokens=add_special_tokens,
+ padding_strategy=padding_strategy,
+ truncation_strategy=truncation_strategy,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ def _batch_encode_plus(
+ self,
+ table: Union["pd.DataFrame", List["pd.DataFrame"]],
+ query: Optional[List[TextInput]] = None,
+ answer: Optional[List[str]] = None,
+ add_special_tokens: bool = True,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ if return_offsets_mapping:
+ raise NotImplementedError(
+ "return_offset_mapping is not available when using Python tokenizers. "
+ "To use this feature, change your tokenizer to one deriving from "
+ "transformers.PreTrainedTokenizerFast."
+ )
+
+ if isinstance(table, pd.DataFrame) and isinstance(query, (list, tuple)):
+ # single table, many queries case
+ # duplicate table for every query
+ table = [table] * len(query)
+ if isinstance(table, (list, tuple)) and isinstance(query, str):
+ # many tables, single query case
+ # duplicate query for every table
+ query = [query] * len(table)
+
+ batch_outputs = self._batch_prepare_for_model(
+ table=table,
+ query=query,
+ answer=answer,
+ add_special_tokens=add_special_tokens,
+ padding_strategy=padding_strategy,
+ truncation_strategy=truncation_strategy,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_attention_mask=return_attention_mask,
+ return_token_type_ids=return_token_type_ids,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_length=return_length,
+ return_tensors=return_tensors,
+ verbose=verbose,
+ )
+
+ return BatchEncoding(batch_outputs)
+
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
+ def _batch_prepare_for_model(
+ self,
+ table: Union["pd.DataFrame", List["pd.DataFrame"]],
+ query: Optional[Union[TextInput, List[TextInput]]] = None,
+ answer: Optional[Union[str, List[str]]] = None,
+ add_special_tokens: bool = True,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[str] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ ) -> BatchEncoding:
+ """
+ This method adds special tokens, truncates sequences if overflowing while taking into account the special
+ tokens and manages a moving window (with user defined stride) for overflowing tokens.
+ """
+ batch_outputs = {}
+ if answer is None:
+ answer = [None] * len(table)
+ for _table, _query, _answer in zip(table, query, answer):
+ text = self.prepare_table_query(
+ _table, _query, _answer, truncation_strategy=truncation_strategy, max_length=max_length
+ )
+
+ if self.do_lower_case:
+ text = text.lower()
+
+ tokens = self.tokenize(text)
+ outputs = self.prepare_for_model(
+ ids=self.convert_tokens_to_ids(tokens),
+ add_special_tokens=add_special_tokens,
+ padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterwards
+ truncation=truncation_strategy.value,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=None, # we pad in batch afterwards
+ return_attention_mask=False, # we pad in batch afterwards
+ return_token_type_ids=return_token_type_ids,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_length=return_length,
+ return_tensors=None, # We convert the whole batch to tensors at the end
+ prepend_batch_axis=False,
+ verbose=verbose,
+ )
+
+ for key, value in outputs.items():
+ if key not in batch_outputs:
+ batch_outputs[key] = []
+ batch_outputs[key].append(value)
+
+ batch_outputs = self.pad(
+ batch_outputs,
+ padding=padding_strategy.value,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_attention_mask=return_attention_mask,
+ )
+
+ batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
+
+ return batch_outputs
+
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING)
+ def encode(
+ self,
+ table: "pd.DataFrame",
+ query: Optional[TextInput] = None,
+ answer: Optional[str] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy, TapexTruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ **kwargs,
+ ) -> List[int]:
+ """
+ Prepare a table, a string and possible answer for the model. This method does not return token type IDs,
+ attention masks, etc. which are necessary for the model to work correctly. Use this method if you want to build
+ your processing on your own, otherwise refer to `__call__`.
+ """
+ encoded_inputs = self.encode_plus(
+ table,
+ query=query,
+ answer=answer,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ return_tensors=return_tensors,
+ **kwargs,
+ )
+
+ return encoded_inputs["input_ids"]
+
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
+ def encode_plus(
+ self,
+ table: "pd.DataFrame",
+ query: Optional[TextInput] = None,
+ answer: Optional[str] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str] = None,
+ max_length: Optional[int] = None,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ return self._encode_plus(
+ table=table,
+ query=query,
+ answer=answer,
+ add_special_tokens=add_special_tokens,
+ padding_strategy=padding_strategy,
+ truncation_strategy=truncation_strategy,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ def _encode_plus(
+ self,
+ table: "pd.DataFrame",
+ query: Optional[TextInput] = None,
+ answer: Optional[str] = None,
+ add_special_tokens: bool = True,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ if return_offsets_mapping:
+ raise NotImplementedError(
+ "return_offset_mapping is not available when using Python tokenizers. "
+ "To use this feature, change your tokenizer to one deriving from "
+ "transformers.PreTrainedTokenizerFast. "
+ "More information on available tokenizers at "
+ "https://github.com/huggingface/transformers/pull/2674"
+ )
+
+ text = self.prepare_table_query(
+ table, query, answer, truncation_strategy=truncation_strategy, max_length=max_length
+ )
+
+ # if necessary, perform lower case
+ if self.do_lower_case:
+ text = text.lower()
+
+ tokens = self.tokenize(text)
+
+ return self.prepare_for_model(
+ ids=self.convert_tokens_to_ids(tokens),
+ add_special_tokens=add_special_tokens,
+ padding=padding_strategy.value,
+ truncation=truncation_strategy.value,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ prepend_batch_axis=True,
+ return_attention_mask=return_attention_mask,
+ return_token_type_ids=return_token_type_ids,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_length=return_length,
+ verbose=verbose,
+ )
+
+ def target_call_func(
+ self,
+ answer: Union[str, List[str]],
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ The method tokenizes and prepares the answer label for the model.
+
+ Args:
+ answer (`str` or `List[str]`):
+ Corresponding answer supervision to the queries for training the model.
+ """
+ is_batched = isinstance(answer, (list, tuple))
+
+ if is_batched:
+ return self.target_batch_encode_plus(
+ answer=answer,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+ else:
+ return self.target_encode_plus(
+ answer=answer,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ def target_batch_encode_plus(
+ self,
+ answer: List[str],
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str] = None,
+ max_length: Optional[int] = None,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ Prepare answer strings for the model.
+
+ Args:
+ answer `List[str]`:
+ Corresponding answer supervision to the queries for training the model.
+ """
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ return self._target_batch_encode_plus(
+ answer=answer,
+ add_special_tokens=add_special_tokens,
+ padding_strategy=padding_strategy,
+ truncation_strategy=truncation_strategy,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ def _target_batch_encode_plus(
+ self,
+ answer: List[str],
+ add_special_tokens: bool = True,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ batch_outputs = {}
+ for text in answer:
+ if self.do_lower_case:
+ text = text.lower()
+
+ tokens = self.tokenize(text)
+ outputs = self.prepare_for_model(
+ ids=self.convert_tokens_to_ids(tokens),
+ add_special_tokens=add_special_tokens,
+ padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterwards
+ truncation=truncation_strategy.value,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=None, # we pad in batch afterwards
+ return_attention_mask=False, # we pad in batch afterwards
+ return_token_type_ids=return_token_type_ids,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_length=return_length,
+ return_tensors=None, # We convert the whole batch to tensors at the end
+ prepend_batch_axis=False,
+ verbose=verbose,
+ )
+
+ for key, value in outputs.items():
+ if key not in batch_outputs:
+ batch_outputs[key] = []
+ batch_outputs[key].append(value)
+
+ batch_outputs = self.pad(
+ batch_outputs,
+ padding=padding_strategy.value,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_attention_mask=return_attention_mask,
+ )
+
+ batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
+
+ return BatchEncoding(batch_outputs)
+
+ def target_encode(
+ self,
+ answer: str,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy, TapexTruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ **kwargs,
+ ) -> List[int]:
+ """
+ Prepare the answer string for the model. This method does not return token type IDs, attention masks, etc.
+ which are necessary for the model to work correctly. Use this method if you want to build your processing on
+ your own, otherwise refer to `__call__`.
+
+ Args:
+ answer `str`:
+ Corresponding answer supervision to the queries for training the model
+ """
+ encoded_outputs = self.target_encode_plus(
+ answer=answer,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ return_tensors=return_tensors,
+ **kwargs,
+ )
+
+ return encoded_outputs["input_ids"]
+
+ def target_encode_plus(
+ self,
+ answer: str,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str] = None,
+ max_length: Optional[int] = None,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ Prepare a answer string for the model.
+
+ Args:
+ answer `str`:
+ Corresponding answer supervision to the queries for training the model.
+ """
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ return self._target_encode_plus(
+ answer=answer,
+ add_special_tokens=add_special_tokens,
+ padding_strategy=padding_strategy,
+ truncation_strategy=truncation_strategy,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ def _target_encode_plus(
+ self,
+ answer: str,
+ add_special_tokens: bool = True,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ if return_offsets_mapping:
+ raise NotImplementedError(
+ "return_offset_mapping is not available when using Python tokenizers. "
+ "To use this feature, change your tokenizer to one deriving from "
+ "transformers.PreTrainedTokenizerFast. "
+ "More information on available tokenizers at "
+ "https://github.com/huggingface/transformers/pull/2674"
+ )
+
+ text = answer
+
+ # if necessary, perform lower case
+ if self.do_lower_case:
+ text = text.lower()
+
+ tokens = self.tokenize(text)
+
+ return self.prepare_for_model(
+ ids=self.convert_tokens_to_ids(tokens),
+ add_special_tokens=add_special_tokens,
+ padding=padding_strategy.value,
+ truncation=truncation_strategy.value,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ prepend_batch_axis=True,
+ return_attention_mask=return_attention_mask,
+ return_token_type_ids=return_token_type_ids,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_length=return_length,
+ verbose=verbose,
+ )
+
+ def prepare_table_query(
+ self,
+ table,
+ query,
+ answer=None,
+ truncation_strategy=Union[str, TruncationStrategy, TapexTruncationStrategy],
+ max_length=None,
+ ):
+ """
+ This method can be used to linearize a table and add a corresponding query.
+
+ Optionally, it also handles truncation of the table (cells).
+
+ An answer can be provided for more precise truncation.
+ """
+ if not table.empty:
+ # step 1: create table dictionary
+ table_content = {"header": list(table.columns), "rows": [list(row.values) for i, row in table.iterrows()]}
+
+ # step 2: modify table internally
+ # always truncate table cells based on self.max_cell_length
+ # optionally truncate rows if truncation_strategy is set to it
+ self.truncate_table_cells(table_content, query, answer)
+ if truncation_strategy == TapexTruncationStrategy.DROP_ROWS_TO_FIT:
+ self.truncate_table_rows(table_content, query, answer, max_length=max_length)
+
+ # step 3: linearize table
+ linear_table = self.table_linearize.process_table(table_content)
+ else:
+ linear_table = ""
+
+ if linear_table == "":
+ logger.warning(
+ "You provide an empty table, or all cells contain much tokens (e.g., >= 1024 tokens). "
+ + f"Please carefully check the corresponding table with the query : {query}."
+ )
+ if query == "":
+ logger.warning("You provide nothing to query with respect to the table.")
+ # step 4: concatenate query with linear_table
+ separator = " " if query and linear_table else ""
+ joint_input = (query + separator + linear_table) if query else linear_table
+
+ return joint_input
+
+ def truncate_table_cells(self, table_content: Dict, question: str, answer: List):
+ # TODO (Qian): is it possible to revert the original cell if it is in the final answer?
+ cell_mapping = {}
+ for row in table_content["rows"]:
+ for i, cell in enumerate(row):
+ truncate_cell = self.truncate_cell(cell)
+ if truncate_cell is not None:
+ cell_mapping[cell] = truncate_cell
+ row[i] = truncate_cell
+
+ # modify the answer list
+ if answer is not None:
+ for i, case in enumerate(answer):
+ if case in cell_mapping.keys():
+ answer[i] = cell_mapping[case]
+
+ def truncate_cell(self, cell_value):
+ # do not process on these cases
+ if isinstance(cell_value, int) or isinstance(cell_value, float):
+ return cell_value
+ if cell_value.strip() != "":
+ try_tokens = self.tokenize(cell_value)
+ if len(try_tokens) >= self.max_cell_length:
+ retain_tokens = try_tokens[: self.max_cell_length]
+ retain_cell_value = self.convert_tokens_to_string(retain_tokens)
+ return retain_cell_value
+ else:
+ return None
+ else:
+ return cell_value
+
+ def truncate_table_rows(
+ self, table_content: Dict, question: str, answer: Optional[Union[str, List[str]]] = None, max_length=None
+ ):
+ """
+ Args:
+ table_content:
+ {"header": xxx, "rows": xxx, "id" (Optionally): xxx}
+
+ question:
+ natural language sentence
+
+ answer:
+ if for training, is the supervision; otherwise will be empty
+ """
+ delete_ratio, remain_token_len = self.estimate_delete_ratio(table_content, question, max_length)
+ # randomly delete unrelated rows
+ self.delete_unrelated_rows(table_content, question, answer, delete_ratio)
+ # guarantee the result < max_length
+ maximum_keep_rows = 0
+ for ind, row_example in enumerate(table_content["rows"]):
+ value_string = self.table_linearize.process_row(row_example, ind + 1)
+ value_token_len = len(self.tokenize(value_string))
+ # over the size limit, and take action
+ if value_token_len > remain_token_len:
+ break
+ remain_token_len -= value_token_len
+ maximum_keep_rows += 1
+ del table_content["rows"][maximum_keep_rows:]
+
+ def estimate_delete_ratio(self, table_content: Dict, question: str, max_length=None):
+ if "header" not in table_content or "rows" not in table_content:
+ raise ValueError("The table content should contain both 'header' and 'rows' keys.")
+ # calculate the tokens of header, special tokens will only be pre-prepended into question
+ question_tokens = self.tokenize(question, add_special_tokens=True)
+ # calculate the tokens of header
+ header_string = self.table_linearize.process_header(table_content["header"])
+ header_tokens = self.tokenize(header_string, add_special_tokens=False)
+ # split all cell values into tokens and see how many can be accommodated
+ used_token_len = len(question_tokens) + len(header_tokens)
+ # remaining token space for rows
+ remain_token_len = max_length - used_token_len
+
+ value_string = ""
+ for _, row_example in enumerate(table_content["rows"]):
+ # use a general index to roughly estimate the overall token len
+ value_string += self.table_linearize.process_row(row_example, 100) + " "
+ value_token_len = len(self.tokenize(value_string))
+
+ if value_token_len < remain_token_len:
+ # no row will be deleted
+ return 0.0, remain_token_len
+ else:
+ # calc a roughly delete rate
+ return 1.0 - remain_token_len / value_token_len, remain_token_len
+
+ def delete_unrelated_rows(self, table_content: Dict, question: str, answer: List, delete_ratio: float):
+ """
+ The argument answer is used only during training.
+ """
+ truncated_unrelated_indices = []
+ related_indices = []
+ if answer is None or len(answer) == 0:
+ answer_set = set()
+ else:
+ answer_set = {ans_ex.lower() for ans_ex in answer}
+ # add question key words into answer set
+ if question is not None:
+ answer_set.update(question.split())
+ question_set = set(question.strip("?!.,").split(" "))
+ row_max_len = len(table_content["rows"])
+ for _row_idx, row in enumerate(table_content["rows"]):
+ lower_row = {str(cell).lower() for cell in row}
+ if len(lower_row & answer_set) == 0 and len(lower_row & question_set) == 0:
+ truncated_unrelated_indices.append(_row_idx)
+ else:
+ # add neighbours to preserve information aggressively
+ related_indices.extend([_row_idx - 2, _row_idx - 1, _row_idx, _row_idx + 1, _row_idx + 2])
+
+ # remove the neighbours
+ truncated_unrelated_indices = [
+ _row_idx for _row_idx in truncated_unrelated_indices if _row_idx not in related_indices
+ ]
+ # select some cases to drop
+ drop_items = min(len(truncated_unrelated_indices), int(len(table_content["rows"]) * delete_ratio))
+ drop_row_indices = random.choices(truncated_unrelated_indices, k=drop_items)
+
+ for _row_idx in reversed(range(row_max_len)):
+ if _row_idx in drop_row_indices:
+ del table_content["rows"][_row_idx]
+
+ # only when the drop ratio is too large, logging for warning.
+ if "id" in table_content and len(drop_row_indices) > 0:
+ logger.warning("Delete {:.2f} rows in table {}".format(len(drop_row_indices), table_content["id"]))
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..b7af1bb48cb7d6a495611b0dadfc910779262813
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__init__.py
@@ -0,0 +1,63 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
+
+
+_import_structure = {
+ "configuration_trajectory_transformer": [
+ "TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
+ "TrajectoryTransformerConfig",
+ ],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_trajectory_transformer"] = [
+ "TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TrajectoryTransformerModel",
+ "TrajectoryTransformerPreTrainedModel",
+ "load_tf_weights_in_trajectory_transformer",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_trajectory_transformer import (
+ TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
+ TrajectoryTransformerConfig,
+ )
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_trajectory_transformer import (
+ TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TrajectoryTransformerModel,
+ TrajectoryTransformerPreTrainedModel,
+ load_tf_weights_in_trajectory_transformer,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..31e428ab9bc5f0e961154245cf15b9994ea7d8b9
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/configuration_trajectory_transformer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/configuration_trajectory_transformer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..250699aa495ab3c4690e2c6c9a822c8369fb2762
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/configuration_trajectory_transformer.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e9c5eb2ab090a0bdbafb637f329e602121706bc4
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/modeling_trajectory_transformer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/modeling_trajectory_transformer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fefbf72d367e37c1db30e32dbd6fc055f94ee051
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/modeling_trajectory_transformer.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/configuration_trajectory_transformer.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/configuration_trajectory_transformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..cfad075c6ae848fd56001615747103baedb4c591
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/configuration_trajectory_transformer.py
@@ -0,0 +1,159 @@
+# coding=utf-8
+# Copyright 2022 The Trajectory Transformers paper authors and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" TrajectoryTransformer model configuration"""
+
+from ....configuration_utils import PretrainedConfig
+from ....utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
+ "https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
+ ),
+ # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
+}
+
+
+class TrajectoryTransformerConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`TrajectoryTransformerModel`]. It is used to
+ instantiate an TrajectoryTransformer model according to the specified arguments, defining the model architecture.
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the
+ TrajectoryTransformer
+ [CarlCochet/trajectory-transformer-halfcheetah-medium-v2](https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2)
+ architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 100):
+ Vocabulary size of the TrajectoryTransformer model. Defines the number of different tokens that can be
+ represented by the `trajectories` passed when calling [`TrajectoryTransformerModel`]
+ action_weight (`int`, *optional*, defaults to 5):
+ Weight of the action in the loss function
+ reward_weight (`int`, *optional*, defaults to 1):
+ Weight of the reward in the loss function
+ value_weight (`int`, *optional*, defaults to 1):
+ Weight of the value in the loss function
+ block_size (`int`, *optional*, defaults to 249):
+ Size of the blocks in the trajectory transformer.
+ action_dim (`int`, *optional*, defaults to 6):
+ Dimension of the action space.
+ observation_dim (`int`, *optional*, defaults to 17):
+ Dimension of the observation space.
+ transition_dim (`int`, *optional*, defaults to 25):
+ Dimension of the transition space.
+ n_layer (`int`, *optional*, defaults to 4):
+ Number of hidden layers in the Transformer encoder.
+ n_head (`int`, *optional*, defaults to 4):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ n_embd (`int`, *optional*, defaults to 128):
+ Dimensionality of the embeddings and hidden states.
+ resid_pdrop (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ embd_pdrop (`int`, *optional*, defaults to 0.1):
+ The dropout ratio for the embeddings.
+ attn_pdrop (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ kaiming_initializer_range (`float, *optional*, defaults to 1):
+ A coefficient scaling the negative slope of the kaiming initializer rectifier for EinLinear layers.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`.
+ Example:
+
+ ```python
+ >>> from transformers import TrajectoryTransformerConfig, TrajectoryTransformerModel
+
+ >>> # Initializing a TrajectoryTransformer CarlCochet/trajectory-transformer-halfcheetah-medium-v2 style configuration
+ >>> configuration = TrajectoryTransformerConfig()
+
+ >>> # Initializing a model (with random weights) from the CarlCochet/trajectory-transformer-halfcheetah-medium-v2 style configuration
+ >>> model = TrajectoryTransformerModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "trajectory_transformer"
+ keys_to_ignore_at_inference = ["past_key_values"]
+ attribute_map = {
+ "hidden_size": "n_embd",
+ "num_attention_heads": "n_head",
+ "num_hidden_layers": "n_layer",
+ }
+
+ def __init__(
+ self,
+ vocab_size=100,
+ action_weight=5,
+ reward_weight=1,
+ value_weight=1,
+ block_size=249,
+ action_dim=6,
+ observation_dim=17,
+ transition_dim=25,
+ n_layer=4,
+ n_head=4,
+ n_embd=128,
+ embd_pdrop=0.1,
+ attn_pdrop=0.1,
+ resid_pdrop=0.1,
+ learning_rate=0.0006,
+ max_position_embeddings=512,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ kaiming_initializer_range=1,
+ use_cache=True,
+ pad_token_id=1,
+ bos_token_id=50256,
+ eos_token_id=50256,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.action_weight = action_weight
+ self.reward_weight = reward_weight
+ self.value_weight = value_weight
+ self.max_position_embeddings = max_position_embeddings
+ self.block_size = block_size
+ self.action_dim = action_dim
+ self.observation_dim = observation_dim
+ self.transition_dim = transition_dim
+ self.learning_rate = learning_rate
+ self.n_layer = n_layer
+ self.n_head = n_head
+ self.n_embd = n_embd
+ self.embd_pdrop = embd_pdrop
+ self.attn_pdrop = attn_pdrop
+ self.resid_pdrop = resid_pdrop
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.kaiming_initializer_range = kaiming_initializer_range
+ self.use_cache = use_cache
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..622552fa78360826fc976d6f1d8c97fcc74a8a38
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.py
@@ -0,0 +1,70 @@
+# coding=utf-8
+# Copyright 2022 The Trajectory Transformers paper authors and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" TrajectoryTransformer pytorch checkpoint conversion"""
+
+import torch
+import trajectory.utils as utils
+
+from transformers import TrajectoryTransformerModel
+
+
+class Parser(utils.Parser):
+ dataset: str = "halfcheetah-medium-expert-v2"
+ config: str = "config.offline"
+
+
+def convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch(logbase, dataset, loadpath, epoch, device):
+ """Converting Sequential blocks to ModuleList"""
+
+ gpt, gpt_epoch = utils.load_model(logbase, dataset, loadpath, epoch=epoch, device=device)
+ trajectory_transformer = TrajectoryTransformerModel(gpt.config)
+
+ trajectory_transformer.tok_emb.load_state_dict(gpt.tok_emb.state_dict())
+ trajectory_transformer.pos_emb = gpt.pos_emb
+ trajectory_transformer.drop.load_state_dict(gpt.drop.state_dict())
+ trajectory_transformer.ln_f.load_state_dict(gpt.ln_f.state_dict())
+ trajectory_transformer.head.load_state_dict(gpt.head.state_dict())
+
+ for i, block in enumerate(gpt.blocks):
+ trajectory_transformer.blocks[i].ln1.load_state_dict(gpt.blocks[i].ln1.state_dict())
+ trajectory_transformer.blocks[i].ln2.load_state_dict(gpt.blocks[i].ln2.state_dict())
+ trajectory_transformer.blocks[i].attn.load_state_dict(gpt.blocks[i].attn.state_dict())
+
+ trajectory_transformer.blocks[i].l1.load_state_dict(gpt.blocks[i].mlp[0].state_dict())
+ trajectory_transformer.blocks[i].act.load_state_dict(gpt.blocks[i].mlp[1].state_dict())
+ trajectory_transformer.blocks[i].l2.load_state_dict(gpt.blocks[i].mlp[2].state_dict())
+ trajectory_transformer.blocks[i].drop.load_state_dict(gpt.blocks[i].mlp[3].state_dict())
+
+ torch.save(trajectory_transformer.state_dict(), "pytorch_model.bin")
+
+
+if __name__ == "__main__":
+ """
+ To run this script you will need to install the original repository to run the original model. You can find it
+ here: https://github.com/jannerm/trajectory-transformer From this repository code you can also download the
+ original pytorch checkpoints.
+
+ Run with the command:
+
+ ```sh
+ >>> python convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.py --dataset
+ ... --gpt_loadpath
+ ```
+ """
+
+ args = Parser().parse_args("plan")
+ convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch(
+ args.logbase, args.dataset, args.gpt_loadpath, args.gpt_epoch, args.device
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/modeling_trajectory_transformer.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/modeling_trajectory_transformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..40c08e4d1d441ae6f30414eacea4423eae5de378
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/modeling_trajectory_transformer.py
@@ -0,0 +1,608 @@
+# coding=utf-8
+# Copyright 2022 The Trajectory Transformers paper authors and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch TrajectoryTransformer model."""
+
+import math
+import os
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import numpy as np
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import functional as F
+
+from ....modeling_utils import PreTrainedModel
+from ....utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_trajectory_transformer import TrajectoryTransformerConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "CarlCochet/trajectory-transformer-halfcheetah-medium-v2"
+_CONFIG_FOR_DOC = "TrajectoryTransformerConfig"
+
+TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "CarlCochet/trajectory-transformer-halfcheetah-medium-v2",
+ # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
+]
+
+
+def load_tf_weights_in_trajectory_transformer(model, config, tf_checkpoint_path):
+ """Load tf checkpoints in a pytorch model."""
+ try:
+ import re
+
+ import numpy as np
+ import tensorflow as tf
+ except ImportError:
+ logger.error(
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
+ "https://www.tensorflow.org/install/ for installation instructions."
+ )
+ raise
+ tf_path = os.path.abspath(tf_checkpoint_path)
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
+ # Load weights from TF model
+ init_vars = tf.train.list_variables(tf_path)
+ names = []
+ arrays = []
+ for name, shape in init_vars:
+ logger.info(f"Loading TF weight {name} with shape {shape}")
+ array = tf.train.load_variable(tf_path, name)
+ names.append(name)
+ arrays.append(array)
+
+ for name, array in zip(names, arrays):
+ name = name.split("/")
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
+ # which are not required for using pretrained model
+ if any(
+ n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
+ for n in name
+ ):
+ logger.info(f"Skipping {'/'.join(name)}")
+ continue
+ pointer = model
+ for m_name in name:
+ if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
+ scope_names = re.split(r"_(\d+)", m_name)
+ else:
+ scope_names = [m_name]
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
+ pointer = getattr(pointer, "weight")
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
+ pointer = getattr(pointer, "bias")
+ elif scope_names[0] == "output_weights":
+ pointer = getattr(pointer, "weight")
+ elif scope_names[0] == "squad":
+ pointer = getattr(pointer, "classifier")
+ else:
+ try:
+ pointer = getattr(pointer, scope_names[0])
+ except AttributeError:
+ logger.info(f"Skipping {'/'.join(name)}")
+ continue
+ if len(scope_names) >= 2:
+ num = int(scope_names[1])
+ pointer = pointer[num]
+ if m_name[-11:] == "_embeddings":
+ pointer = getattr(pointer, "weight")
+ elif m_name == "kernel":
+ array = np.transpose(array)
+ try:
+ if pointer.shape != array.shape:
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
+ except AssertionError as e:
+ e.args += (pointer.shape, array.shape)
+ raise
+ logger.info(f"Initialize PyTorch weight {name}")
+ pointer.data = torch.from_numpy(array)
+ return model
+
+
+@dataclass
+class TrajectoryTransformerOutput(ModelOutput):
+ """
+ Base class for model's outputs that also contains a pooling of the last hidden states.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ past_key_values (`Tuple[Tuple[torch.Tensor]]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of length `config.n_layers`, containing tuples of tensors of shape `(batch_size, num_heads,
+ sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the
+ attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
+ plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. GPT2Attentions weights after the attention softmax, used to compute the weighted average
+ in the self-attention heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+class TrajectoryTransformerPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = TrajectoryTransformerConfig
+ load_tf_weights = load_tf_weights_in_trajectory_transformer
+ base_model_prefix = "trajectory_transformer"
+ main_input_name = "trajectories"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ if isinstance(module, (nn.Linear, nn.Embedding)):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if isinstance(module, nn.Linear) and module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+ elif isinstance(module, EinLinear):
+ for i in range(module.n_models):
+ nn.init.kaiming_uniform_(module.weight[i], a=math.sqrt(5) / self.config.kaiming_initializer_range)
+ if module.bias is not None:
+ fan_in, _ = nn.init._calculate_fan_in_and_fan_out(module.weight[i])
+ bound = (1 / math.sqrt(fan_in)) * self.config.initializer_range
+ nn.init.uniform_(module.bias[i], -bound, bound)
+
+
+TRAJECTORY_TRANSFORMER_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`TrajectoryTransformerConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+TRAJECTORY_TRANSFORMER_INPUTS_DOCSTRING = r"""
+ Args:
+ trajectories (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Batch of trajectories, where a trajectory is a sequence of states, actions and rewards.
+ past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`, *optional*):
+ Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see
+ `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have
+ their past given to this model should not be passed as `input_ids` as they have already been computed.
+ targets (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Desired targets used to compute the loss.
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+class EinLinear(nn.Module):
+ def __init__(self, n_models, in_features, out_features, bias):
+ super().__init__()
+ self.n_models = n_models
+ self.out_features = out_features
+ self.in_features = in_features
+ self.weight = nn.Parameter(torch.Tensor(n_models, out_features, in_features))
+ if bias:
+ self.bias = nn.Parameter(torch.Tensor(n_models, out_features))
+ else:
+ self.register_parameter("bias", None)
+
+ def reset_parameters(self):
+ for i in range(self.n_models):
+ nn.init.kaiming_uniform_(self.weight[i], a=math.sqrt(5))
+ if self.bias is not None:
+ fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight[i])
+ bound = 1 / math.sqrt(fan_in)
+ nn.init.uniform_(self.bias[i], -bound, bound)
+
+ def forward(self, input):
+ """
+ Args:
+ input (`torch.FloatTensor` of shape `(B, n_models, input_dim)`):
+ The input to the layer.
+ """
+ # [ batch_size x n_models x output_dim ]
+ output = torch.einsum("eoi,bei->beo", self.weight, input)
+ if self.bias is not None:
+ raise RuntimeError()
+ return output
+
+
+class CausalSelfAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+
+ if config.n_embd % config.n_head != 0:
+ raise ValueError(f"n_head ({config.n_head}) should be a divisor of n_embd ({config.n_embd})")
+
+ # key, query, value projections for all heads
+ self.key = nn.Linear(config.n_embd, config.n_embd)
+ self.query = nn.Linear(config.n_embd, config.n_embd)
+ self.value = nn.Linear(config.n_embd, config.n_embd)
+
+ # regularization
+ self.attn_drop = nn.Dropout(config.attn_pdrop)
+ self.resid_drop = nn.Dropout(config.resid_pdrop)
+
+ # output projection
+ self.proj = nn.Linear(config.n_embd, config.n_embd)
+
+ # causal mask to ensure that attention is only applied to the left in the input sequence
+ self.register_buffer(
+ "mask",
+ torch.tril(torch.ones(config.block_size, config.block_size)).view(
+ 1, 1, config.block_size, config.block_size
+ ),
+ persistent=False,
+ )
+
+ # mask previous value estimates
+ joined_dim = config.observation_dim + config.action_dim + 2
+ self.mask.squeeze()[:, joined_dim - 1 :: joined_dim] = 0
+
+ self.n_head = config.n_head
+
+ def forward(
+ self,
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
+ use_cache: Optional[bool] = False,
+ output_attentions: Optional[bool] = False,
+ ):
+ batch_size, sequence_length, embedding_dim = hidden_states.size()
+
+ # calculate query, key, values for all heads in batch and move head forward to be the batch dim
+ # [ batch_size x n_heads x sequence_length x head_dim ]
+ key = (
+ self.key(hidden_states)
+ .view(batch_size, sequence_length, self.n_head, embedding_dim // self.n_head)
+ .transpose(1, 2)
+ )
+ query = (
+ self.query(hidden_states)
+ .view(batch_size, sequence_length, self.n_head, embedding_dim // self.n_head)
+ .transpose(1, 2)
+ )
+ value = (
+ self.value(hidden_states)
+ .view(batch_size, sequence_length, self.n_head, embedding_dim // self.n_head)
+ .transpose(1, 2)
+ )
+
+ if layer_past is not None:
+ past_key, past_value = layer_past
+ key = torch.cat((past_key, key), dim=-2)
+ value = torch.cat((past_value, value), dim=-2)
+
+ if use_cache is True:
+ present = (key, value)
+ else:
+ present = None
+
+ # causal self-attention
+ # [ batch_size x n_heads x sequence_length x sequence_length ]
+ attn_weights = (torch.matmul(query, key.transpose(-2, -1))) * (1.0 / math.sqrt(key.size(-1)))
+ attn_weights = attn_weights.masked_fill(
+ self.mask[:, :, :sequence_length, :sequence_length] == 0, torch.finfo(attn_weights.dtype).min
+ )
+ attn_weights = F.softmax(attn_weights, dim=-1)
+ self._attn_map = attn_weights.clone()
+ attn_weights = self.attn_drop(attn_weights)
+
+ output = torch.matmul(attn_weights, value)
+ # [ batch_size x sequence_length x embedding_dim ]
+ # re-assemble all head outputs side by side
+ output = output.transpose(1, 2).contiguous().view(batch_size, sequence_length, embedding_dim)
+
+ # output projection
+ output = self.resid_drop(self.proj(output))
+
+ outputs = (output, present)
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+class Block(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.ln1 = nn.LayerNorm(config.n_embd)
+ self.ln2 = nn.LayerNorm(config.n_embd)
+ self.attn = CausalSelfAttention(config)
+
+ # MLP
+ self.l1 = nn.Linear(config.n_embd, 4 * config.n_embd)
+ self.act = nn.GELU()
+ self.l2 = nn.Linear(4 * config.n_embd, config.n_embd)
+ self.drop = nn.Dropout(config.resid_pdrop)
+
+ def forward(
+ self,
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
+ use_cache: Optional[bool] = False,
+ output_attentions: Optional[bool] = False,
+ ):
+ residual = hidden_states
+ hidden_states = self.ln1(hidden_states)
+
+ attn_outputs = self.attn(
+ hidden_states, layer_past=layer_past, use_cache=use_cache, output_attentions=output_attentions
+ )
+ attn_output = attn_outputs[0]
+ outputs = attn_outputs[1:]
+ hidden_states = attn_output + residual
+
+ residual = hidden_states
+ hidden_states = self.ln2(hidden_states)
+ hidden_states = self.l1(hidden_states)
+ hidden_states = self.act(hidden_states)
+ hidden_states = self.l2(hidden_states)
+ hidden_states = residual + self.drop(hidden_states)
+
+ if use_cache:
+ outputs = (hidden_states,) + outputs
+ else:
+ outputs = (hidden_states,) + outputs[1:]
+
+ return outputs
+
+
+@add_start_docstrings(
+ "The bare TrajectoryTransformer Model transformer outputting raw hidden-states without any specific head on top.",
+ TRAJECTORY_TRANSFORMER_START_DOCSTRING,
+)
+class TrajectoryTransformerModel(TrajectoryTransformerPreTrainedModel):
+ """the full GPT language model, with a context size of block_size"""
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ # input embedding stem (+1 for stop token)
+ self.tok_emb = nn.Embedding(config.vocab_size * config.transition_dim + 1, config.n_embd)
+
+ self.pos_emb = nn.Parameter(torch.zeros(1, config.block_size, config.n_embd))
+ self.drop = nn.Dropout(config.embd_pdrop)
+ # transformer
+ self.blocks = nn.ModuleList([Block(config) for _ in range(config.n_layer)])
+ # decoder head
+ self.ln_f = nn.LayerNorm(config.n_embd)
+ self.head = EinLinear(config.transition_dim, config.n_embd, config.vocab_size + 1, bias=False)
+
+ self.vocab_size = config.vocab_size
+ self.stop_token = config.vocab_size * config.transition_dim
+ self.block_size = config.block_size
+
+ self.observation_dim = config.observation_dim
+ self.action_dim = config.action_dim
+ self.transition_dim = config.transition_dim
+ self.embedding_dim = config.n_embd
+
+ self.action_weight = config.action_weight
+ self.reward_weight = config.reward_weight
+ self.value_weight = config.value_weight
+
+ self.gradient_checkpointing = False
+
+ self.post_init()
+
+ def get_block_size(self):
+ return self.block_size
+
+ def offset_tokens(self, trajectories):
+ _, sequence_length = trajectories.shape
+
+ n_states = int(np.ceil(sequence_length / self.transition_dim))
+
+ offsets = torch.arange(self.transition_dim) * self.vocab_size
+ offsets = offsets.repeat(n_states).to(trajectories.device)
+
+ offset_trajectories = trajectories + offsets[:sequence_length]
+ offset_trajectories[trajectories == self.vocab_size] = self.stop_token
+ return offset_trajectories
+
+ def pad_to_full_observation(self, hidden_states):
+ batch_size, sequence_length, _ = hidden_states.shape
+
+ n_pad = (self.transition_dim - sequence_length % self.transition_dim) % self.transition_dim
+ padding = torch.zeros(batch_size, n_pad, self.embedding_dim, device=hidden_states.device)
+
+ # [ batch_size x padded_sequence_length' x embedding_dim ]
+ hidden_states_pad = torch.cat([hidden_states, padding], dim=1)
+ hidden_states_pad = hidden_states_pad.view(-1, self.transition_dim, self.embedding_dim)
+
+ return hidden_states_pad, n_pad
+
+ @add_start_docstrings_to_model_forward(
+ TRAJECTORY_TRANSFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")
+ )
+ @replace_return_docstrings(output_type=TrajectoryTransformerOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ trajectories: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ targets: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], TrajectoryTransformerOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import TrajectoryTransformerModel
+ >>> import torch
+
+ >>> model = TrajectoryTransformerModel.from_pretrained(
+ ... "CarlCochet/trajectory-transformer-halfcheetah-medium-v2"
+ ... )
+ >>> model.to(device)
+ >>> model.eval()
+
+ >>> observations_dim, action_dim, batch_size = 17, 6, 256
+ >>> seq_length = observations_dim + action_dim + 1
+
+ >>> trajectories = torch.LongTensor([np.random.permutation(self.seq_length) for _ in range(batch_size)]).to(
+ ... device
+ ... )
+ >>> targets = torch.LongTensor([np.random.permutation(self.seq_length) for _ in range(batch_size)]).to(device)
+
+ >>> outputs = model(
+ ... trajectories,
+ ... targets=targets,
+ ... use_cache=True,
+ ... output_attentions=True,
+ ... output_hidden_states=True,
+ ... return_dict=True,
+ ... )
+ ```
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+
+ if past_key_values is None:
+ past_key_values = tuple([None] * len(self.blocks))
+
+ batch_size, sequence_length = trajectories.size()
+
+ if sequence_length > self.block_size:
+ raise ValueError("Cannot forward, model block size is exhausted.")
+
+ offset_trajectories = self.offset_tokens(trajectories)
+ # [ batch_size x sequence_length x embedding_dim ]
+ # forward the GPT model
+ token_embeddings = self.tok_emb(offset_trajectories) # each index maps to a (learnable) vector
+ position_embeddings = self.pos_emb[:, :sequence_length, :] # each position maps to a (learnable) vector
+
+ hidden_states = self.drop(token_embeddings + position_embeddings)
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ presents = () if use_cache else None
+ all_self_attentions = () if output_attentions else None
+ all_hidden_states = () if output_hidden_states else None
+
+ for i, (block, layer_past) in enumerate(zip(self.blocks, past_key_values)):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ outputs = self._gradient_checkpointing_func(
+ block.__call__,
+ hidden_states,
+ layer_past,
+ use_cache,
+ output_attentions,
+ )
+ else:
+ outputs = block(hidden_states, layer_past, use_cache, output_attentions)
+
+ hidden_states = outputs[0]
+ if use_cache is True:
+ presents = presents + (outputs[1],)
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
+
+ # [ batch_size x sequence_length x embedding_dim ]
+ hidden_state = self.ln_f(hidden_states)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ hidden_states_pad, n_pad = self.pad_to_full_observation(hidden_state)
+
+ logits = self.head(hidden_states_pad)
+ logits = logits.reshape(batch_size, sequence_length + n_pad, self.vocab_size + 1)
+ logits = logits[:, :sequence_length]
+
+ # if we are given some desired targets also calculate the loss
+ if targets is not None:
+ loss = F.cross_entropy(logits.reshape(-1, logits.size(-1)), targets.view(-1), reduction="none")
+ if self.action_weight != 1 or self.reward_weight != 1 or self.value_weight != 1:
+ # make weights
+ n_states = int(np.ceil(sequence_length / self.transition_dim))
+ weights = torch.cat(
+ [
+ torch.ones(self.observation_dim, device=trajectories.device),
+ torch.ones(self.action_dim, device=trajectories.device) * self.action_weight,
+ torch.ones(1, device=trajectories.device) * self.reward_weight,
+ torch.ones(1, device=trajectories.device) * self.value_weight,
+ ]
+ )
+ weights = weights.repeat(n_states)
+ weights = weights[1:].repeat(batch_size, 1)
+ loss = loss * weights.view(-1)
+ loss = (loss * attention_mask.view(-1)).mean()
+ else:
+ loss = None
+
+ if not return_dict:
+ return tuple(v for v in [loss, logits, presents, all_hidden_states, all_self_attentions] if v is not None)
+
+ return TrajectoryTransformerOutput(
+ loss=loss,
+ logits=logits,
+ past_key_values=presents,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f3674e19665ca74e1e6ee3ac92ca812e54580007
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__init__.py
@@ -0,0 +1,97 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
+
+
+_import_structure = {
+ "configuration_transfo_xl": ["TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig"],
+ "tokenization_transfo_xl": ["TransfoXLCorpus", "TransfoXLTokenizer"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_transfo_xl"] = [
+ "TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "AdaptiveEmbedding",
+ "TransfoXLForSequenceClassification",
+ "TransfoXLLMHeadModel",
+ "TransfoXLModel",
+ "TransfoXLPreTrainedModel",
+ "load_tf_weights_in_transfo_xl",
+ ]
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_transfo_xl"] = [
+ "TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TFAdaptiveEmbedding",
+ "TFTransfoXLForSequenceClassification",
+ "TFTransfoXLLMHeadModel",
+ "TFTransfoXLMainLayer",
+ "TFTransfoXLModel",
+ "TFTransfoXLPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
+ from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_transfo_xl import (
+ TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
+ AdaptiveEmbedding,
+ TransfoXLForSequenceClassification,
+ TransfoXLLMHeadModel,
+ TransfoXLModel,
+ TransfoXLPreTrainedModel,
+ load_tf_weights_in_transfo_xl,
+ )
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_transfo_xl import (
+ TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TFAdaptiveEmbedding,
+ TFTransfoXLForSequenceClassification,
+ TFTransfoXLLMHeadModel,
+ TFTransfoXLMainLayer,
+ TFTransfoXLModel,
+ TFTransfoXLPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1392cbf1b7a19ee9c0626413edffab9266af3a69
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/configuration_transfo_xl.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/configuration_transfo_xl.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ef1e758e0698cff477e65eb29d3f58ba7241101b
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/configuration_transfo_xl.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/convert_transfo_xl_original_tf_checkpoint_to_pytorch.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/convert_transfo_xl_original_tf_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..679c223e3bd726f2baba7fa21b88a94179d3e6b5
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/convert_transfo_xl_original_tf_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_tf_transfo_xl.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_tf_transfo_xl.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a00406085a4607e1d8af9dee3bb289d3c735f702
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_tf_transfo_xl.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_tf_transfo_xl_utilities.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_tf_transfo_xl_utilities.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..95d1e7e9b10544851f669992fd37dee2c6089f94
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_tf_transfo_xl_utilities.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_transfo_xl.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_transfo_xl.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fc8f6b6a52ed42af3264d68284aed70f8ff53ecc
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_transfo_xl.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_transfo_xl_utilities.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_transfo_xl_utilities.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..df30fb4f64fdb351d66bcbf21b1dc4803e454ac6
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_transfo_xl_utilities.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/tokenization_transfo_xl.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/tokenization_transfo_xl.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b578e0e9834a9da59075bffbf3cf1f0408a42568
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/tokenization_transfo_xl.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/configuration_transfo_xl.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/configuration_transfo_xl.py
new file mode 100644
index 0000000000000000000000000000000000000000..f7d5f2f87fb1ade361a6867f5505789d934e6ba4
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/configuration_transfo_xl.py
@@ -0,0 +1,190 @@
+# coding=utf-8
+# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Transformer XL configuration"""
+
+from ....configuration_utils import PretrainedConfig
+from ....utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "transfo-xl/transfo-xl-wt103": "https://huggingface.co/transfo-xl/transfo-xl-wt103/resolve/main/config.json",
+}
+
+
+class TransfoXLConfig(PretrainedConfig):
+ """
+ This is the configuration class to store the configuration of a [`TransfoXLModel`] or a [`TFTransfoXLModel`]. It is
+ used to instantiate a Transformer-XL model according to the specified arguments, defining the model architecture.
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the TransfoXL
+ [transfo-xl/transfo-xl-wt103](https://huggingface.co/transfo-xl/transfo-xl-wt103) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 267735):
+ Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`TransfoXLModel`] or [`TFTransfoXLModel`].
+ cutoffs (`List[int]`, *optional*, defaults to `[20000, 40000, 200000]`):
+ Cutoffs for the adaptive softmax.
+ d_model (`int`, *optional*, defaults to 1024):
+ Dimensionality of the model's hidden states.
+ d_embed (`int`, *optional*, defaults to 1024):
+ Dimensionality of the embeddings
+ n_head (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ d_head (`int`, *optional*, defaults to 64):
+ Dimensionality of the model's heads.
+ d_inner (`int`, *optional*, defaults to 4096):
+ Inner dimension in FF
+ div_val (`int`, *optional*, defaults to 4):
+ Divident value for adapative input and softmax
+ pre_lnorm (`boolean`, *optional*, defaults to `False`):
+ Whether or not to apply LayerNorm to the input instead of the output in the blocks.
+ n_layer (`int`, *optional*, defaults to 18):
+ Number of hidden layers in the Transformer encoder.
+ mem_len (`int`, *optional*, defaults to 1600):
+ Length of the retained previous heads.
+ clamp_len (`int`, *optional*, defaults to 1000):
+ Use the same pos embeddings after clamp_len.
+ same_length (`boolean`, *optional*, defaults to `True`):
+ Whether or not to use the same attn length for all tokens
+ proj_share_all_but_first (`boolean`, *optional*, defaults to `True`):
+ True to share all but first projs, False not to share.
+ attn_type (`int`, *optional*, defaults to 0):
+ Attention type. 0 for Transformer-XL, 1 for Shaw et al, 2 for Vaswani et al, 3 for Al Rfou et al.
+ sample_softmax (`int`, *optional*, defaults to -1):
+ Number of samples in the sampled softmax.
+ adaptive (`boolean`, *optional*, defaults to `True`):
+ Whether or not to use adaptive softmax.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ dropatt (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ untie_r (`boolean`, *optional*, defaults to `True`):
+ Whether ot not to untie relative position biases.
+ init (`str`, *optional*, defaults to `"normal"`):
+ Parameter initializer to use.
+ init_range (`float`, *optional*, defaults to 0.01):
+ Parameters initialized by U(-init_range, init_range).
+ proj_init_std (`float`, *optional*, defaults to 0.01):
+ Parameters initialized by N(0, init_std)
+ init_std (`float`, *optional*, defaults to 0.02):
+ Parameters initialized by N(0, init_std)
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
+ The epsilon to use in the layer normalization layers
+ eos_token_id (`int`, *optional*, defaults to 0):
+ End of stream token id.
+
+ Examples:
+
+ ```python
+ >>> from transformers import TransfoXLConfig, TransfoXLModel
+
+ >>> # Initializing a Transformer XL configuration
+ >>> configuration = TransfoXLConfig()
+
+ >>> # Initializing a model (with random weights) from the configuration
+ >>> model = TransfoXLModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "transfo-xl"
+ keys_to_ignore_at_inference = ["mems"]
+ attribute_map = {
+ "n_token": "vocab_size",
+ "hidden_size": "d_model",
+ "num_attention_heads": "n_head",
+ "num_hidden_layers": "n_layer",
+ }
+
+ def __init__(
+ self,
+ vocab_size=267735,
+ cutoffs=[20000, 40000, 200000],
+ d_model=1024,
+ d_embed=1024,
+ n_head=16,
+ d_head=64,
+ d_inner=4096,
+ div_val=4,
+ pre_lnorm=False,
+ n_layer=18,
+ mem_len=1600,
+ clamp_len=1000,
+ same_length=True,
+ proj_share_all_but_first=True,
+ attn_type=0,
+ sample_softmax=-1,
+ adaptive=True,
+ dropout=0.1,
+ dropatt=0.0,
+ untie_r=True,
+ init="normal",
+ init_range=0.01,
+ proj_init_std=0.01,
+ init_std=0.02,
+ layer_norm_epsilon=1e-5,
+ eos_token_id=0,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.cutoffs = []
+ self.cutoffs.extend(cutoffs)
+ if proj_share_all_but_first:
+ self.tie_projs = [False] + [True] * len(self.cutoffs)
+ else:
+ self.tie_projs = [False] + [False] * len(self.cutoffs)
+ self.d_model = d_model
+ self.d_embed = d_embed
+ self.d_head = d_head
+ self.d_inner = d_inner
+ self.div_val = div_val
+ self.pre_lnorm = pre_lnorm
+ self.n_layer = n_layer
+ self.n_head = n_head
+ self.mem_len = mem_len
+ self.same_length = same_length
+ self.attn_type = attn_type
+ self.clamp_len = clamp_len
+ self.sample_softmax = sample_softmax
+ self.adaptive = adaptive
+ self.dropout = dropout
+ self.dropatt = dropatt
+ self.untie_r = untie_r
+ self.init = init
+ self.init_range = init_range
+ self.proj_init_std = proj_init_std
+ self.init_std = init_std
+ self.layer_norm_epsilon = layer_norm_epsilon
+ super().__init__(eos_token_id=eos_token_id, **kwargs)
+
+ @property
+ def max_position_embeddings(self):
+ # Message copied from Transformer-XL documentation
+ logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit.")
+ return -1
+
+ @max_position_embeddings.setter
+ def max_position_embeddings(self, value):
+ # Message copied from Transformer-XL documentation
+ raise NotImplementedError(
+ f"The model {self.model_type} is one of the few models that has no sequence length limit."
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..d2693ac333b84b08769eb15a13a26dcf1a547267
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py
@@ -0,0 +1,121 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert Transformer XL checkpoint and datasets."""
+
+
+import argparse
+import os
+import pickle
+import sys
+
+import torch
+
+from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
+from transformers.models.deprecated.transfo_xl import tokenization_transfo_xl as data_utils
+from transformers.models.deprecated.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
+from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
+
+
+logging.set_verbosity_info()
+
+# We do this to be able to load python 2 datasets pickles
+# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
+data_utils.Vocab = data_utils.TransfoXLTokenizer
+data_utils.Corpus = data_utils.TransfoXLCorpus
+sys.modules["data_utils"] = data_utils
+sys.modules["vocabulary"] = data_utils
+
+
+def convert_transfo_xl_checkpoint_to_pytorch(
+ tf_checkpoint_path, transfo_xl_config_file, pytorch_dump_folder_path, transfo_xl_dataset_file
+):
+ if transfo_xl_dataset_file:
+ # Convert a pre-processed corpus (see original TensorFlow repo)
+ with open(transfo_xl_dataset_file, "rb") as fp:
+ corpus = pickle.load(fp, encoding="latin1")
+ # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
+ pytorch_vocab_dump_path = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
+ print(f"Save vocabulary to {pytorch_vocab_dump_path}")
+ corpus_vocab_dict = corpus.vocab.__dict__
+ torch.save(corpus_vocab_dict, pytorch_vocab_dump_path)
+
+ corpus_dict_no_vocab = corpus.__dict__
+ corpus_dict_no_vocab.pop("vocab", None)
+ pytorch_dataset_dump_path = pytorch_dump_folder_path + "/" + CORPUS_NAME
+ print(f"Save dataset to {pytorch_dataset_dump_path}")
+ torch.save(corpus_dict_no_vocab, pytorch_dataset_dump_path)
+
+ if tf_checkpoint_path:
+ # Convert a pre-trained TensorFlow model
+ config_path = os.path.abspath(transfo_xl_config_file)
+ tf_path = os.path.abspath(tf_checkpoint_path)
+
+ print(f"Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.")
+ # Initialise PyTorch model
+ if transfo_xl_config_file == "":
+ config = TransfoXLConfig()
+ else:
+ config = TransfoXLConfig.from_json_file(transfo_xl_config_file)
+ print(f"Building PyTorch model from configuration: {config}")
+ model = TransfoXLLMHeadModel(config)
+
+ model = load_tf_weights_in_transfo_xl(model, config, tf_path)
+ # Save pytorch-model
+ pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)
+ pytorch_config_dump_path = os.path.join(pytorch_dump_folder_path, CONFIG_NAME)
+ print(f"Save PyTorch model to {os.path.abspath(pytorch_weights_dump_path)}")
+ torch.save(model.state_dict(), pytorch_weights_dump_path)
+ print(f"Save configuration file to {os.path.abspath(pytorch_config_dump_path)}")
+ with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
+ f.write(config.to_json_string())
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--pytorch_dump_folder_path",
+ default=None,
+ type=str,
+ required=True,
+ help="Path to the folder to store the PyTorch model or dataset/vocab.",
+ )
+ parser.add_argument(
+ "--tf_checkpoint_path",
+ default="",
+ type=str,
+ help="An optional path to a TensorFlow checkpoint path to be converted.",
+ )
+ parser.add_argument(
+ "--transfo_xl_config_file",
+ default="",
+ type=str,
+ help=(
+ "An optional config json file corresponding to the pre-trained BERT model. \n"
+ "This specifies the model architecture."
+ ),
+ )
+ parser.add_argument(
+ "--transfo_xl_dataset_file",
+ default="",
+ type=str,
+ help="An optional dataset file to be converted in a vocabulary.",
+ )
+ args = parser.parse_args()
+ convert_transfo_xl_checkpoint_to_pytorch(
+ args.tf_checkpoint_path,
+ args.transfo_xl_config_file,
+ args.pytorch_dump_folder_path,
+ args.transfo_xl_dataset_file,
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab2725df0c4dcf5563e4379d79af426a0e7e99ed
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl.py
@@ -0,0 +1,1124 @@
+# coding=utf-8
+# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+ TF 2.0 Transformer XL model.
+"""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import List, Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ....modeling_tf_utils import (
+ TFModelInputType,
+ TFPreTrainedModel,
+ TFSequenceClassificationLoss,
+ get_initializer,
+ keras,
+ keras_serializable,
+ unpack_inputs,
+)
+from ....tf_utils import shape_list, stable_softmax
+from ....utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+)
+from .configuration_transfo_xl import TransfoXLConfig
+from .modeling_tf_transfo_xl_utilities import TFAdaptiveSoftmaxMask
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "transfo-xl/transfo-xl-wt103"
+_CONFIG_FOR_DOC = "TransfoXLConfig"
+
+TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "transfo-xl/transfo-xl-wt103",
+ # See all Transformer XL models at https://huggingface.co/models?filter=transfo-xl
+]
+
+
+class TFPositionalEmbedding(keras.layers.Layer):
+ def __init__(self, demb, **kwargs):
+ super().__init__(**kwargs)
+
+ self.inv_freq = 1 / (10000 ** (tf.range(0, demb, 2.0) / demb))
+
+ def call(self, pos_seq, bsz=None):
+ self.inv_freq = tf.cast(self.inv_freq, dtype=pos_seq.dtype)
+ sinusoid_inp = tf.einsum("i,j->ij", pos_seq, self.inv_freq)
+ pos_emb = tf.concat([tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)], -1)
+
+ if bsz is not None:
+ return tf.tile(pos_emb[:, None, :], [1, bsz, 1])
+ else:
+ return pos_emb[:, None, :]
+
+
+class TFPositionwiseFF(keras.layers.Layer):
+ def __init__(self, d_model, d_inner, dropout, pre_lnorm=False, layer_norm_epsilon=1e-5, init_std=0.02, **kwargs):
+ super().__init__(**kwargs)
+
+ self.d_model = d_model
+ self.d_inner = d_inner
+ self.dropout = dropout
+
+ self.layer_1 = keras.layers.Dense(
+ d_inner, kernel_initializer=get_initializer(init_std), activation=tf.nn.relu, name="CoreNet_._0"
+ )
+ self.drop_1 = keras.layers.Dropout(dropout)
+ self.layer_2 = keras.layers.Dense(d_model, kernel_initializer=get_initializer(init_std), name="CoreNet_._3")
+ self.drop_2 = keras.layers.Dropout(dropout)
+
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name="layer_norm")
+
+ self.pre_lnorm = pre_lnorm
+
+ def call(self, inp, training=False):
+ if self.pre_lnorm:
+ # layer normalization + positionwise feed-forward
+ core_out = self.layer_norm(inp)
+ core_out = self.layer_1(core_out)
+ core_out = self.drop_1(core_out, training=training)
+ core_out = self.layer_2(core_out)
+ core_out = self.drop_2(core_out, training=training)
+
+ # residual connection
+ output = core_out + inp
+ else:
+ # positionwise feed-forward
+ core_out = self.layer_1(inp)
+ core_out = self.drop_1(core_out, training=training)
+ core_out = self.layer_2(core_out)
+ core_out = self.drop_2(core_out, training=training)
+
+ # residual connection + layer normalization
+ output = self.layer_norm(inp + core_out)
+
+ return output
+
+
+class TFRelPartialLearnableMultiHeadAttn(keras.layers.Layer):
+ def __init__(
+ self,
+ n_head,
+ d_model,
+ d_head,
+ dropout,
+ dropatt=0.0,
+ pre_lnorm=False,
+ r_r_bias=None,
+ r_w_bias=None,
+ layer_norm_epsilon=1e-5,
+ init_std=0.02,
+ output_attentions=False,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.n_head = n_head
+ self.d_model = d_model
+ self.d_head = d_head
+ self.dropout = dropout
+ self.output_attentions = output_attentions
+
+ self.qkv_net = keras.layers.Dense(
+ 3 * n_head * d_head, kernel_initializer=get_initializer(init_std), use_bias=False, name="qkv_net"
+ )
+
+ self.drop = keras.layers.Dropout(dropout)
+ self.dropatt = keras.layers.Dropout(dropatt)
+ self.o_net = keras.layers.Dense(
+ d_model, kernel_initializer=get_initializer(init_std), use_bias=False, name="o_net"
+ )
+
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name="layer_norm")
+
+ self.scale = 1 / (d_head**0.5)
+
+ self.pre_lnorm = pre_lnorm
+
+ if r_r_bias is not None and r_w_bias is not None: # Biases are shared
+ self.r_r_bias = r_r_bias
+ self.r_w_bias = r_w_bias
+ else:
+ self.r_r_bias = None
+ self.r_w_bias = None
+
+ self.r_net = keras.layers.Dense(
+ self.n_head * self.d_head, kernel_initializer=get_initializer(init_std), use_bias=False, name="r_net"
+ )
+
+ def build(self, input_shape):
+ if self.r_r_bias is None or self.r_w_bias is None: # Biases are not shared
+ self.r_r_bias = self.add_weight(
+ shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_r_bias"
+ )
+ self.r_w_bias = self.add_weight(
+ shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_w_bias"
+ )
+ super().build(input_shape)
+
+ def _rel_shift(self, x):
+ x_size = shape_list(x)
+
+ x = tf.pad(x, [[0, 0], [1, 0], [0, 0], [0, 0]])
+ x = tf.reshape(x, [x_size[1] + 1, x_size[0], x_size[2], x_size[3]])
+ x = tf.slice(x, [1, 0, 0, 0], [-1, -1, -1, -1])
+ x = tf.reshape(x, x_size)
+
+ return x
+
+ def call(self, w, r, attn_mask, mems, head_mask, output_attentions, training=False):
+ qlen, rlen, bsz = shape_list(w)[0], shape_list(r)[0], shape_list(w)[1]
+
+ if mems is not None:
+ mems = tf.cast(mems, dtype=w.dtype)
+ cat = tf.concat([mems, w], 0)
+ if self.pre_lnorm:
+ w_heads = self.qkv_net(self.layer_norm(cat))
+ else:
+ w_heads = self.qkv_net(cat)
+ r_head_k = self.r_net(r)
+
+ w_head_q, w_head_k, w_head_v = tf.split(w_heads, 3, axis=-1)
+ w_head_q = w_head_q[-qlen:]
+ else:
+ if self.pre_lnorm:
+ w_heads = self.qkv_net(self.layer_norm(w))
+ else:
+ w_heads = self.qkv_net(w)
+ r_head_k = self.r_net(r)
+
+ w_head_q, w_head_k, w_head_v = tf.split(w_heads, 3, axis=-1)
+
+ klen = shape_list(w_head_k)[0]
+
+ w_head_q = tf.reshape(w_head_q, (qlen, bsz, self.n_head, self.d_head)) # qlen x bsz x n_head x d_head
+ w_head_k = tf.reshape(w_head_k, (klen, bsz, self.n_head, self.d_head)) # qlen x bsz x n_head x d_head
+ w_head_v = tf.reshape(w_head_v, (klen, bsz, self.n_head, self.d_head)) # qlen x bsz x n_head x d_head
+
+ r_head_k = tf.reshape(r_head_k, (rlen, self.n_head, self.d_head)) # qlen x n_head x d_head
+
+ # compute attention score
+ rw_head_q = w_head_q + self.r_w_bias # qlen x bsz x n_head x d_head
+ AC = tf.einsum("ibnd,jbnd->ijbn", rw_head_q, w_head_k) # qlen x klen x bsz x n_head
+
+ rr_head_q = w_head_q + self.r_r_bias
+ BD = tf.einsum("ibnd,jnd->ijbn", rr_head_q, r_head_k) # qlen x klen x bsz x n_head
+ BD = self._rel_shift(BD)
+
+ # [qlen x klen x bsz x n_head]
+ attn_score = AC + BD
+ attn_score = attn_score * self.scale
+
+ # compute attention probability
+ if attn_mask is not None:
+ attn_mask_t = attn_mask[:, :, None, None]
+ attn_mask_t = tf.cast(attn_mask_t, dtype=attn_score.dtype)
+ attn_score = attn_score * (1.0 - attn_mask_t) - 1e30 * attn_mask_t
+
+ # [qlen x klen x bsz x n_head]
+ attn_prob = stable_softmax(attn_score, axis=1)
+ attn_prob = self.dropatt(attn_prob, training=training)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attn_prob = attn_prob * head_mask
+
+ # compute attention vector
+ attn_vec = tf.einsum("ijbn,jbnd->ibnd", attn_prob, w_head_v)
+
+ # [qlen x bsz x n_head x d_head]
+ attn_vec_sizes = shape_list(attn_vec)
+ attn_vec = tf.reshape(attn_vec, (attn_vec_sizes[0], attn_vec_sizes[1], self.n_head * self.d_head))
+
+ # linear projection
+ attn_out = self.o_net(attn_vec)
+ attn_out = self.drop(attn_out, training=training)
+
+ if self.pre_lnorm:
+ # residual connection
+ outputs = [w + attn_out]
+ else:
+ # residual connection + layer normalization
+ outputs = [self.layer_norm(w + attn_out)]
+
+ if output_attentions:
+ outputs.append(attn_prob)
+
+ return outputs
+
+
+class TFRelPartialLearnableDecoderLayer(keras.layers.Layer):
+ def __init__(
+ self,
+ n_head,
+ d_model,
+ d_head,
+ d_inner,
+ dropout,
+ dropatt=0.0,
+ pre_lnorm=False,
+ r_w_bias=None,
+ r_r_bias=None,
+ layer_norm_epsilon=1e-5,
+ init_std=0.02,
+ output_attentions=False,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.dec_attn = TFRelPartialLearnableMultiHeadAttn(
+ n_head,
+ d_model,
+ d_head,
+ dropout,
+ dropatt=dropatt,
+ pre_lnorm=pre_lnorm,
+ r_w_bias=r_w_bias,
+ r_r_bias=r_r_bias,
+ init_std=init_std,
+ layer_norm_epsilon=layer_norm_epsilon,
+ output_attentions=output_attentions,
+ name="dec_attn",
+ )
+ self.pos_ff = TFPositionwiseFF(
+ d_model,
+ d_inner,
+ dropout,
+ pre_lnorm=pre_lnorm,
+ init_std=init_std,
+ layer_norm_epsilon=layer_norm_epsilon,
+ name="pos_ff",
+ )
+
+ def call(self, dec_inp, r, dec_attn_mask, mems, head_mask, output_attentions, training=False):
+ attn_outputs = self.dec_attn(dec_inp, r, dec_attn_mask, mems, head_mask, output_attentions, training=training)
+ ff_output = self.pos_ff(attn_outputs[0], training=training)
+
+ outputs = [ff_output] + attn_outputs[1:]
+
+ return outputs
+
+
+class TFTransfoEmbeddings(keras.layers.Layer):
+ def __init__(self, vocab_size, emb_size, init_std, **kwargs):
+ super().__init__(**kwargs)
+
+ self.vocab_size = vocab_size
+ self.emb_size = emb_size
+ self.init_std = init_std
+
+ def build(self, input_shape):
+ self.weight = self.add_weight(
+ shape=(self.vocab_size, self.emb_size),
+ initializer=get_initializer(self.init_std),
+ name="embeddings",
+ )
+
+ super().build(input_shape)
+
+ def call(self, inputs):
+ return tf.gather(self.weight, inputs)
+
+
+class TFAdaptiveEmbedding(keras.layers.Layer):
+ def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, init_std=0.02, sample_softmax=False, **kwargs):
+ super().__init__(**kwargs)
+
+ self.n_token = n_token
+ self.d_embed = d_embed
+ self.init_std = init_std
+
+ self.cutoffs = cutoffs + [n_token]
+ self.div_val = div_val
+ self.d_proj = d_proj
+
+ self.emb_scale = d_proj**0.5
+
+ self.cutoff_ends = [0] + self.cutoffs
+
+ self.emb_layers = []
+ self.emb_projs = []
+
+ if div_val == 1:
+ raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
+ else:
+ for i in range(len(self.cutoffs)):
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
+ d_emb_i = d_embed // (div_val**i)
+ self.emb_layers.append(
+ TFTransfoEmbeddings(
+ r_idx - l_idx,
+ d_emb_i,
+ init_std,
+ name=f"emb_layers_._{i}",
+ )
+ )
+
+ def build(self, input_shape):
+ for i in range(len(self.cutoffs)):
+ d_emb_i = self.d_embed // (self.div_val**i)
+ self.emb_projs.append(
+ self.add_weight(
+ shape=(d_emb_i, self.d_proj),
+ initializer=get_initializer(self.init_std),
+ trainable=True,
+ name=f"emb_projs_._{i}",
+ )
+ )
+
+ super().build(input_shape)
+
+ def call(self, inp):
+ if self.div_val == 1:
+ raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
+ else:
+ inp_flat = tf.reshape(inp, (-1,))
+ emb_flat = tf.zeros([shape_list(inp_flat)[0], self.d_proj])
+ for i in range(len(self.cutoffs)):
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
+
+ mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
+
+ inp_i = tf.boolean_mask(inp_flat, mask_i) - l_idx
+ emb_i = self.emb_layers[i](inp_i)
+ emb_i = tf.einsum("id,de->ie", emb_i, self.emb_projs[i])
+
+ mask_idx = tf.where(mask_i)
+ scatter = tf.scatter_nd(mask_idx, emb_i, shape_list(emb_flat))
+ emb_flat = tf.cast(emb_flat, dtype=scatter.dtype)
+ emb_flat += scatter
+
+ embed_shape = shape_list(inp) + [self.d_proj]
+ embed = tf.reshape(emb_flat, embed_shape)
+
+ embed *= self.emb_scale
+
+ return embed
+
+
+@keras_serializable
+class TFTransfoXLMainLayer(keras.layers.Layer):
+ config_class = TransfoXLConfig
+
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.output_hidden_states = config.output_hidden_states
+ self.output_attentions = config.output_attentions
+ self.return_dict = config.use_return_dict
+
+ self.n_token = config.vocab_size
+
+ self.d_embed = config.d_embed
+ self.d_model = config.d_model
+ self.n_head = config.n_head
+ self.d_head = config.d_head
+ self.untie_r = config.untie_r
+
+ self.word_emb = TFAdaptiveEmbedding(
+ config.vocab_size,
+ config.d_embed,
+ config.d_model,
+ config.cutoffs,
+ div_val=config.div_val,
+ init_std=config.init_std,
+ name="word_emb",
+ )
+
+ self.drop = keras.layers.Dropout(config.dropout)
+
+ self.n_layer = config.n_layer
+ self.mem_len = config.mem_len
+ self.attn_type = config.attn_type
+
+ self.layers = []
+ if config.attn_type == 0: # the default attention
+ for i in range(config.n_layer):
+ self.layers.append(
+ TFRelPartialLearnableDecoderLayer(
+ config.n_head,
+ config.d_model,
+ config.d_head,
+ config.d_inner,
+ config.dropout,
+ dropatt=config.dropatt,
+ pre_lnorm=config.pre_lnorm,
+ r_w_bias=None if self.untie_r else self.r_w_bias,
+ r_r_bias=None if self.untie_r else self.r_r_bias,
+ layer_norm_epsilon=config.layer_norm_epsilon,
+ init_std=config.init_std,
+ output_attentions=self.output_attentions,
+ name=f"layers_._{i}",
+ )
+ )
+ else: # learnable embeddings and absolute embeddings
+ raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
+
+ self.same_length = config.same_length
+ self.clamp_len = config.clamp_len
+
+ if self.attn_type == 0: # default attention
+ self.pos_emb = TFPositionalEmbedding(self.d_model, name="pos_emb")
+ else: # learnable embeddings and absolute embeddings
+ raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
+
+ def build(self, input_shape):
+ if not self.untie_r:
+ self.r_w_bias = self.add_weight(
+ shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_w_bias"
+ )
+ self.r_r_bias = self.add_weight(
+ shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_r_bias"
+ )
+ super().build(input_shape)
+
+ def get_input_embeddings(self):
+ return self.word_emb
+
+ def set_input_embeddings(self, value):
+ raise NotImplementedError
+
+ def backward_compatible(self):
+ self.sample_softmax = -1
+
+ def reset_memory_length(self, mem_len):
+ self.mem_len = mem_len
+
+ def _prune_heads(self, heads):
+ raise NotImplementedError
+
+ def init_mems(self, bsz):
+ if self.mem_len > 0:
+ mems = []
+ for i in range(self.n_layer):
+ empty = tf.zeros([self.mem_len, bsz, self.d_model])
+ mems.append(empty)
+
+ return mems
+ else:
+ return None
+
+ def _update_mems(self, hids, mems, mlen, qlen):
+ # does not deal with None
+ if mems is None:
+ return None
+
+ # mems is not None
+ assert len(hids) == len(mems), "len(hids) != len(mems)"
+
+ # There are `mlen + qlen` steps that can be cached into mems
+ new_mems = []
+ end_idx = mlen + tf.math.maximum(0, qlen)
+ beg_idx = tf.math.maximum(0, end_idx - tf.convert_to_tensor(self.mem_len))
+ for i in range(len(hids)):
+ mems[i] = tf.cast(mems[i], dtype=hids[i].dtype)
+ cat = tf.concat([mems[i], hids[i]], axis=0)
+ tf.stop_gradient(cat)
+ new_mems.append(cat[beg_idx:end_idx])
+
+ return new_mems
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ mems: List[tf.Tensor] | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: bool = False,
+ ):
+ # the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library
+ # so we transpose here from shape [bsz, len] to shape [len, bsz]
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_ids = tf.transpose(input_ids, perm=(1, 0))
+ qlen, bsz = shape_list(input_ids)
+ elif inputs_embeds is not None:
+ inputs_embeds = tf.transpose(inputs_embeds, perm=(1, 0, 2))
+ qlen, bsz = shape_list(inputs_embeds)[:2]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if mems is None:
+ mems = self.init_mems(bsz)
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
+ # and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
+ if head_mask is not None:
+ raise NotImplementedError
+ else:
+ head_mask = [None] * self.n_layer
+
+ if inputs_embeds is not None:
+ word_emb = inputs_embeds
+ else:
+ word_emb = self.word_emb(input_ids)
+
+ mlen = shape_list(mems[0])[0] if mems is not None else 0
+ klen = mlen + qlen
+
+ # Compute decoder attention mask
+ all_ones = tf.ones([qlen, klen], dtype=tf.int32)
+ upper_mask = 1 - tf.linalg.band_part(tf.ones([qlen, klen], dtype=tf.int32), -1, mlen)
+ if self.same_length:
+ mask_len = klen - self.mem_len
+ mask_shift_len = qlen - tf.nn.relu(mask_len) # Lazy clamping of negatives to zero
+
+ # Use an indicator variable instead of a conditional to keep the compiler happy
+ lower_mask = tf.linalg.band_part(all_ones, -1, 0) - (
+ tf.linalg.band_part(all_ones, mask_shift_len - 1, 0) * tf.cast(mask_shift_len != 0, tf.int32)
+ )
+ dec_attn_mask = upper_mask + lower_mask
+ else:
+ dec_attn_mask = upper_mask
+
+ hids = []
+ attentions = [] if output_attentions else None
+ if self.attn_type == 0: # default
+ pos_seq = tf.range(klen - 1, -1, -1.0)
+ if self.clamp_len > 0:
+ pos_seq = tf.minimum(pos_seq, self.clamp_len)
+ pos_emb = self.pos_emb(pos_seq)
+
+ core_out = self.drop(word_emb, training=training)
+ pos_emb = self.drop(pos_emb, training=training)
+
+ for i, layer in enumerate(self.layers):
+ hids.append(core_out)
+ mems_i = None if mems is None else mems[i]
+ layer_outputs = layer(
+ core_out,
+ pos_emb,
+ dec_attn_mask,
+ mems_i,
+ head_mask[i],
+ output_attentions,
+ training=training,
+ )
+ core_out = layer_outputs[0]
+ if output_attentions:
+ attentions.append(layer_outputs[1])
+ else: # learnable embeddings and absolute embeddings
+ raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
+
+ core_out = self.drop(core_out, training=training)
+
+ new_mems = self._update_mems(hids, mems, mlen, qlen)
+
+ # We transpose back here to shape [bsz, len, hidden_dim]
+ core_out = tf.transpose(core_out, perm=(1, 0, 2))
+
+ if output_hidden_states:
+ # Transpose to library standard shape [bsz, len, hidden_dim] and add last layer
+ hids = tuple(tf.transpose(t, perm=(1, 0, 2)) for t in hids)
+ hids = hids + (core_out,)
+ else:
+ hids = None
+ if output_attentions:
+ # Transpose to library standard shape [bsz, n_heads, query_seq_len, key_seq_len]
+ attentions = tuple(tf.transpose(t, perm=(2, 3, 0, 1)) for t in attentions)
+
+ if not return_dict:
+ return tuple(v for v in [core_out, new_mems, hids, attentions] if v is not None)
+
+ return TFTransfoXLModelOutput(
+ last_hidden_state=core_out,
+ mems=new_mems,
+ hidden_states=hids,
+ attentions=attentions,
+ )
+
+
+class TFTransfoXLPreTrainedModel(TFPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = TransfoXLConfig
+ base_model_prefix = "transformer"
+
+
+@dataclass
+class TFTransfoXLModelOutput(ModelOutput):
+ """
+ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
+
+ Args:
+ last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ mems (`List[tf.Tensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`
+ input) to speed up sequential decoding. The token ids which have their past given to this model should not
+ be passed as input ids as they have already been computed.
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ last_hidden_state: tf.Tensor = None
+ mems: List[tf.Tensor] = None
+ hidden_states: Tuple[tf.Tensor] | None = None
+ attentions: Tuple[tf.Tensor] | None = None
+
+
+@dataclass
+class TFTransfoXLLMHeadModelOutput(ModelOutput):
+ """
+ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
+
+ Args:
+ losses (`tf.Tensor` of shape *(batch_size, sequence_length-1)*, *optional*, returned when `labels` is provided):
+ Language modeling losses (not reduced).
+ prediction_scores (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token after SoftMax).
+ mems (`List[tf.Tensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`
+ input) to speed up sequential decoding. The token ids which have their past given to this model should not
+ be passed as input ids as they have already been computed.
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ prediction_scores: tf.Tensor = None
+ mems: List[tf.Tensor] = None
+ hidden_states: Tuple[tf.Tensor] | None = None
+ attentions: Tuple[tf.Tensor] | None = None
+
+
+@dataclass
+class TFTransfoXLSequenceClassifierOutputWithPast(ModelOutput):
+ """
+ Base class for outputs of sentence classification models.
+
+ Args:
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Classification (or regression if config.num_labels==1) loss.
+ logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`):
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
+ mems (`List[tf.Tensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`
+ input) to speed up sequential decoding. The token ids which have their past given to this model should not
+ be passed as input ids as they have already been computed.
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: tf.Tensor | None = None
+ logits: tf.Tensor = None
+ mems: List[tf.Tensor] = None
+ hidden_states: Tuple[tf.Tensor] | None = None
+ attentions: Tuple[tf.Tensor] | None = None
+
+
+TRANSFO_XL_START_DOCSTRING = r"""
+
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TensorFlow models and layers in `transformers` accept two formats as input:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional argument.
+
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
+ positional argument:
+
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
+
+ Note that when creating models and layers with
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
+ about any of this, as you can just pass inputs like you would to any other Python function!
+
+
+
+ Parameters:
+ config ([`TransfoXLConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+TRANSFO_XL_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
+ [`PreTrainedTokenizer.encode`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ mems (`List[tf.Tensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see
+ `mems` output below). Can be used to speed up sequential decoding. The token ids which have their mems
+ given to this model should not be passed as `input_ids` as they have already been computed.
+ head_mask (`tf.Tensor` or `Numpy array` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+ inputs_embeds (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
+ config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+"""
+
+
+@add_start_docstrings(
+ "The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
+ TRANSFO_XL_START_DOCSTRING,
+)
+class TFTransfoXLModel(TFTransfoXLPreTrainedModel):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.transformer = TFTransfoXLMainLayer(config, name="transformer")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFTransfoXLModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ mems: List[tf.Tensor] | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: bool | None = None,
+ output_hidden_states: bool | None = None,
+ return_dict: bool | None = None,
+ training: bool = False,
+ ) -> TFTransfoXLModelOutput | Tuple[tf.Tensor]:
+ outputs = self.transformer(
+ input_ids=input_ids,
+ mems=mems,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return outputs
+
+
+@add_start_docstrings(
+ """
+ The Transformer-XL Model with a language modeling head on top (adaptive softmax with weights tied to the adaptive
+ input embeddings)
+ """,
+ TRANSFO_XL_START_DOCSTRING,
+)
+class TFTransfoXLLMHeadModel(TFTransfoXLPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.transformer = TFTransfoXLMainLayer(config, name="transformer")
+ self.sample_softmax = config.sample_softmax
+ assert self.sample_softmax <= 0, (
+ "Sampling from the softmax is not implemented yet. Please look at issue: #3310:"
+ " https://github.com/huggingface/transformers/issues/3310"
+ )
+
+ self.crit = TFAdaptiveSoftmaxMask(
+ config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val, name="crit"
+ )
+
+ def _resize_token_embeddings(self, new_num_tokens):
+ raise NotImplementedError()
+
+ def get_output_embeddings(self):
+ """Double-check if you are using adaptive softmax."""
+ if len(self.crit.out_layers) > 0:
+ return self.crit.out_layers[-1]
+ return None
+
+ def reset_memory_length(self, mem_len):
+ self.transformer.reset_memory_length(mem_len)
+
+ def init_mems(self, bsz):
+ return self.transformer.init_mems(bsz)
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFTransfoXLLMHeadModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ mems: List[tf.Tensor] | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: bool | None = None,
+ output_hidden_states: bool | None = None,
+ return_dict: bool | None = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: bool = False,
+ ) -> TFTransfoXLLMHeadModelOutput | Tuple[tf.Tensor]:
+ if input_ids is not None:
+ bsz, tgt_len = shape_list(input_ids)[:2]
+ else:
+ bsz, tgt_len = shape_list(inputs_embeds)[:2]
+
+ transformer_outputs = self.transformer(
+ input_ids,
+ mems,
+ head_mask,
+ inputs_embeds,
+ output_attentions,
+ output_hidden_states,
+ return_dict,
+ training=training,
+ )
+
+ last_hidden = transformer_outputs[0]
+ pred_hid = last_hidden[:, -tgt_len:]
+
+ softmax_output = self.crit(pred_hid, labels, training=training)
+ prediction_scores = softmax_output if labels is None else ()
+
+ if not return_dict:
+ return (prediction_scores,) + transformer_outputs[1:]
+
+ return TFTransfoXLLMHeadModelOutput(
+ prediction_scores=prediction_scores,
+ mems=transformer_outputs.mems,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **model_kwargs):
+ inputs = {}
+
+ # if past is defined in model kwargs then use it for faster decoding
+ if past_key_values:
+ input_ids = tf.expand_dims(input_ids[:, -1], axis=-1)
+ else:
+ input_ids = input_ids
+
+ return inputs
+
+ # Adapted from the torch tie_weights function
+ def tf_to_pt_weight_rename(self, tf_weight):
+ if self.config.tie_word_embeddings and "crit.out_layers" in tf_weight:
+ return tf_weight, tf_weight.replace("crit.out_layers", "transformer.word_emb.emb_layers")
+ elif self.config.tie_projs and "crit.out_projs" in tf_weight:
+ for i, tie_proj in enumerate(self.config.tie_projs):
+ if tie_proj and self.config.div_val == 1 and self.config.d_model != self.config.d_embed:
+ # self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[0]
+ return tf_weight, tf_weight.replace(f"crit.out_projs.{i}", "transformer.word_emb.emb_projs.0")
+ elif tie_proj and self.config.div_val != 1:
+ # self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[i]
+ return tf_weight, tf_weight.replace("crit.out_projs", "transformer.word_emb.emb_projs")
+ else:
+ return (tf_weight,)
+
+
+@add_start_docstrings(
+ """
+ The Transfo XL Model transformer with a sequence classification head on top (linear layer).
+
+ [`TFTransfoXLForSequenceClassification`] uses the last token in order to do the classification, as other causal
+ models (e.g. GPT-1,GPT-2) do.
+
+ Since it does classification on the last token, it requires to know the position of the last token. If a
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
+ each row of the batch).
+ """,
+ TRANSFO_XL_START_DOCSTRING,
+)
+class TFTransfoXLForSequenceClassification(TFTransfoXLPreTrainedModel, TFSequenceClassificationLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.num_labels = config.num_labels
+ self.score = keras.layers.Dense(
+ config.num_labels,
+ kernel_initializer=get_initializer(config.init_range),
+ name="score",
+ use_bias=False,
+ )
+ self.transformer = TFTransfoXLMainLayer(config, name="transformer")
+
+ def get_output_embeddings(self):
+ # Remove after transformers v4.32. Fix this model's `test_model_common_attributes` test too.
+ logger.warning(
+ "Sequence classification models do not have output embeddings. `.get_output_embeddings` will be removed "
+ "in transformers v4.32."
+ )
+ return self.transformer.word_emb
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFTransfoXLSequenceClassifierOutputWithPast,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ mems: List[tf.Tensor] | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[Tuple, TFTransfoXLSequenceClassifierOutputWithPast]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,
+ config.vocab_size - 1]`.
+ """
+ transformer_outputs = self.transformer(
+ input_ids=input_ids,
+ mems=mems,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ hidden_states = transformer_outputs[0]
+ logits = self.score(hidden_states)
+ in_logits = None
+ if self.config.pad_token_id is None:
+ sequence_lengths = -1
+ else:
+ if input_ids is not None:
+ sequence_lengths = (
+ tf.argmax(tf.cast(tf.math.equal(input_ids, self.config.pad_token_id), input_ids.dtype), axis=-1)
+ - 1
+ )
+ sequence_lengths = tf.where(sequence_lengths >= 0, sequence_lengths, input_ids.shape[-1] - 1)
+ in_logits = tf.gather(logits, sequence_lengths, batch_dims=1, axis=1)
+ else:
+ sequence_lengths = -1
+ logger.warning(
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
+ )
+ loss = None
+
+ if labels is not None:
+ if input_ids is not None:
+ batch_size, sequence_length = shape_list(input_ids)[:2]
+ else:
+ batch_size, sequence_length = shape_list(inputs_embeds)[:2]
+ assert (
+ self.config.pad_token_id is not None or batch_size == 1
+ ), "Cannot handle batch sizes > 1 if no padding token is defined."
+
+ if not tf.is_tensor(sequence_lengths):
+ in_logits = logits[0:batch_size, sequence_lengths]
+
+ loss = self.hf_compute_loss(tf.reshape(labels, [-1, 1]), tf.reshape(in_logits, [-1, self.num_labels]))
+
+ pooled_logits = in_logits if in_logits is not None else logits
+
+ if not return_dict:
+ output = (pooled_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFTransfoXLSequenceClassifierOutputWithPast(
+ loss=loss,
+ logits=pooled_logits,
+ mems=transformer_outputs.mems,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl_utilities.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl_utilities.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed1488d5595cb8f36eb540992fb4ca46534a60fb
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl_utilities.py
@@ -0,0 +1,179 @@
+# coding=utf-8
+# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+ A TF 2.0 Adaptive Softmax for Transformer XL model.
+"""
+
+
+import tensorflow as tf
+
+from ....modeling_tf_utils import keras
+from ....tf_utils import shape_list
+
+
+class TFAdaptiveSoftmaxMask(keras.layers.Layer):
+ def __init__(self, vocab_size, d_embed, d_proj, cutoffs, div_val=1, keep_order=False, **kwargs):
+ super().__init__(**kwargs)
+
+ self.vocab_size = vocab_size
+ self.d_embed = d_embed
+ self.d_proj = d_proj
+
+ self.cutoffs = cutoffs + [vocab_size]
+ self.cutoff_ends = [0] + self.cutoffs
+ self.div_val = div_val
+
+ self.shortlist_size = self.cutoffs[0]
+ self.n_clusters = len(self.cutoffs) - 1
+ self.head_size = self.shortlist_size + self.n_clusters
+ self.keep_order = keep_order
+
+ self.out_layers = []
+ self.out_projs = []
+
+ def build(self, input_shape):
+ if self.n_clusters > 0:
+ self.cluster_weight = self.add_weight(
+ shape=(self.n_clusters, self.d_embed), initializer="zeros", trainable=True, name="cluster_weight"
+ )
+ self.cluster_bias = self.add_weight(
+ shape=(self.n_clusters,), initializer="zeros", trainable=True, name="cluster_bias"
+ )
+
+ if self.div_val == 1:
+ for i in range(len(self.cutoffs)):
+ if self.d_proj != self.d_embed:
+ weight = self.add_weight(
+ shape=(self.d_embed, self.d_proj),
+ initializer="zeros",
+ trainable=True,
+ name=f"out_projs_._{i}",
+ )
+ self.out_projs.append(weight)
+ else:
+ self.out_projs.append(None)
+ weight = self.add_weight(
+ shape=(self.vocab_size, self.d_embed),
+ initializer="zeros",
+ trainable=True,
+ name=f"out_layers_._{i}_._weight",
+ )
+ bias = self.add_weight(
+ shape=(self.vocab_size,),
+ initializer="zeros",
+ trainable=True,
+ name=f"out_layers_._{i}_._bias",
+ )
+ self.out_layers.append((weight, bias))
+ else:
+ for i in range(len(self.cutoffs)):
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
+ d_emb_i = self.d_embed // (self.div_val**i)
+
+ weight = self.add_weight(
+ shape=(d_emb_i, self.d_proj), initializer="zeros", trainable=True, name=f"out_projs_._{i}"
+ )
+ self.out_projs.append(weight)
+ weight = self.add_weight(
+ shape=(r_idx - l_idx, d_emb_i),
+ initializer="zeros",
+ trainable=True,
+ name=f"out_layers_._{i}_._weight",
+ )
+ bias = self.add_weight(
+ shape=(r_idx - l_idx,),
+ initializer="zeros",
+ trainable=True,
+ name=f"out_layers_._{i}_._bias",
+ )
+ self.out_layers.append((weight, bias))
+ super().build(input_shape)
+
+ @staticmethod
+ def _logit(x, W, b, proj=None):
+ y = x
+ if proj is not None:
+ y = tf.einsum("ibd,ed->ibe", y, proj)
+ return tf.einsum("ibd,nd->ibn", y, W) + b
+
+ @staticmethod
+ def _gather_logprob(logprob, target):
+ lp_size = shape_list(logprob)
+ r = tf.range(lp_size[0], dtype=target.dtype)
+ idx = tf.stack([r, target], 1)
+ return tf.gather_nd(logprob, idx)
+
+ def call(self, hidden, target, return_mean=True, training=False):
+ head_logprob = 0
+ if self.n_clusters == 0:
+ output = self._logit(hidden, self.out_layers[0][0], self.out_layers[0][1], self.out_projs[0])
+ if target is not None:
+ loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=output)
+ out = tf.nn.log_softmax(output, axis=-1)
+ else:
+ hidden_sizes = shape_list(hidden)
+ out = []
+ loss = tf.zeros(hidden_sizes[:2])
+ for i in range(len(self.cutoffs)):
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
+ if target is not None:
+ mask = (target >= l_idx) & (target < r_idx)
+ mask_idx = tf.where(mask)
+ cur_target = tf.boolean_mask(target, mask) - l_idx
+
+ if self.div_val == 1:
+ cur_W = self.out_layers[0][0][l_idx:r_idx]
+ cur_b = self.out_layers[0][1][l_idx:r_idx]
+ else:
+ cur_W = self.out_layers[i][0]
+ cur_b = self.out_layers[i][1]
+
+ if i == 0:
+ cur_W = tf.concat([cur_W, self.cluster_weight], 0)
+ cur_b = tf.concat([cur_b, self.cluster_bias], 0)
+
+ head_logit = self._logit(hidden, cur_W, cur_b, self.out_projs[0])
+ head_logprob = tf.nn.log_softmax(head_logit)
+ out.append(head_logprob[..., : self.cutoffs[0]])
+ if target is not None:
+ cur_head_logprob = tf.boolean_mask(head_logprob, mask)
+ cur_logprob = self._gather_logprob(cur_head_logprob, cur_target)
+ else:
+ tail_logit = self._logit(hidden, cur_W, cur_b, self.out_projs[i])
+ tail_logprob = tf.nn.log_softmax(tail_logit)
+ cluster_prob_idx = self.cutoffs[0] + i - 1 # No probability for the head cluster
+ logprob_i = head_logprob[..., cluster_prob_idx, None] + tail_logprob
+ out.append(logprob_i)
+ if target is not None:
+ cur_head_logprob = tf.boolean_mask(head_logprob, mask)
+ cur_tail_logprob = tf.boolean_mask(tail_logprob, mask)
+ cur_logprob = self._gather_logprob(cur_tail_logprob, cur_target)
+ cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
+ if target is not None:
+ loss += tf.scatter_nd(mask_idx, -cur_logprob, shape_list(loss))
+ out = tf.concat(out, axis=-1)
+
+ if target is not None:
+ if return_mean:
+ loss = tf.reduce_mean(loss)
+ # Add the training-time loss value to the layer using `self.add_loss()`.
+ self.add_loss(loss)
+
+ # Log the loss as a metric (we could log arbitrary metrics,
+ # including different metrics for training and inference.
+ self.add_metric(loss, name=self.name, aggregation="mean" if return_mean else "")
+
+ return out
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b8f222f508a35f0c19c6120ee9c355794e392a0
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py
@@ -0,0 +1,1297 @@
+# coding=utf-8
+# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+ PyTorch Transformer XL model. Adapted from https://github.com/kimiyoung/transformer-xl. In particular
+ https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/mem_transformer.py
+"""
+import warnings
+from dataclasses import dataclass
+from typing import List, Optional, Tuple, Union
+
+import torch
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ....modeling_utils import PreTrainedModel
+from ....utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+)
+from .configuration_transfo_xl import TransfoXLConfig
+from .modeling_transfo_xl_utilities import ProjectedAdaptiveLogSoftmax
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "transfo-xl/transfo-xl-wt103"
+_CONFIG_FOR_DOC = "TransfoXLConfig"
+
+TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "transfo-xl/transfo-xl-wt103",
+ # See all Transformer XL models at https://huggingface.co/models?filter=transfo-xl
+]
+
+
+def build_tf_to_pytorch_map(model, config):
+ """
+ A map of modules from TF to PyTorch. This time I use a map to keep the PyTorch model as identical to the original
+ PyTorch model as possible.
+ """
+ tf_to_pt_map = {}
+
+ if hasattr(model, "transformer"):
+ # We are loading in a TransfoXLLMHeadModel => we will load also the Adaptive Softmax
+ tf_to_pt_map.update(
+ {
+ "transformer/adaptive_softmax/cutoff_0/cluster_W": model.crit.cluster_weight,
+ "transformer/adaptive_softmax/cutoff_0/cluster_b": model.crit.cluster_bias,
+ }
+ )
+ for i, (out_l, proj_l, tie_proj) in enumerate(
+ zip(model.crit.out_layers, model.crit.out_projs, config.tie_projs)
+ ):
+ layer_str = f"transformer/adaptive_softmax/cutoff_{i}/"
+ if config.tie_word_embeddings:
+ tf_to_pt_map.update({layer_str + "b": out_l.bias})
+ else:
+ raise NotImplementedError
+ # I don't think this is implemented in the TF code
+ tf_to_pt_map.update({layer_str + "lookup_table": out_l.weight, layer_str + "b": out_l.bias})
+ if not tie_proj:
+ tf_to_pt_map.update({layer_str + "proj": proj_l})
+ # Now load the rest of the transformer
+ model = model.transformer
+
+ # Embeddings
+ for i, (embed_l, proj_l) in enumerate(zip(model.word_emb.emb_layers, model.word_emb.emb_projs)):
+ layer_str = f"transformer/adaptive_embed/cutoff_{i}/"
+ tf_to_pt_map.update({layer_str + "lookup_table": embed_l.weight, layer_str + "proj_W": proj_l})
+
+ # Transformer blocks
+ for i, b in enumerate(model.layers):
+ layer_str = f"transformer/layer_{i}/"
+ tf_to_pt_map.update(
+ {
+ layer_str + "rel_attn/LayerNorm/gamma": b.dec_attn.layer_norm.weight,
+ layer_str + "rel_attn/LayerNorm/beta": b.dec_attn.layer_norm.bias,
+ layer_str + "rel_attn/o/kernel": b.dec_attn.o_net.weight,
+ layer_str + "rel_attn/qkv/kernel": b.dec_attn.qkv_net.weight,
+ layer_str + "rel_attn/r/kernel": b.dec_attn.r_net.weight,
+ layer_str + "ff/LayerNorm/gamma": b.pos_ff.layer_norm.weight,
+ layer_str + "ff/LayerNorm/beta": b.pos_ff.layer_norm.bias,
+ layer_str + "ff/layer_1/kernel": b.pos_ff.CoreNet[0].weight,
+ layer_str + "ff/layer_1/bias": b.pos_ff.CoreNet[0].bias,
+ layer_str + "ff/layer_2/kernel": b.pos_ff.CoreNet[3].weight,
+ layer_str + "ff/layer_2/bias": b.pos_ff.CoreNet[3].bias,
+ }
+ )
+
+ # Relative positioning biases
+ if config.untie_r:
+ r_r_list = []
+ r_w_list = []
+ for b in model.layers:
+ r_r_list.append(b.dec_attn.r_r_bias)
+ r_w_list.append(b.dec_attn.r_w_bias)
+ else:
+ r_r_list = [model.r_r_bias]
+ r_w_list = [model.r_w_bias]
+ tf_to_pt_map.update({"transformer/r_r_bias": r_r_list, "transformer/r_w_bias": r_w_list})
+ return tf_to_pt_map
+
+
+def load_tf_weights_in_transfo_xl(model, config, tf_path):
+ """Load tf checkpoints in a pytorch model"""
+ try:
+ import numpy as np
+ import tensorflow as tf
+ except ImportError:
+ logger.error(
+ "Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
+ "https://www.tensorflow.org/install/ for installation instructions."
+ )
+ raise
+ # Build TF to PyTorch weights loading map
+ tf_to_pt_map = build_tf_to_pytorch_map(model, config)
+
+ # Load weights from TF model
+ init_vars = tf.train.list_variables(tf_path)
+ tf_weights = {}
+ for name, shape in init_vars:
+ logger.info(f"Loading TF weight {name} with shape {shape}")
+ array = tf.train.load_variable(tf_path, name)
+ tf_weights[name] = array
+
+ for name, pointer in tf_to_pt_map.items():
+ assert name in tf_weights
+ array = tf_weights[name]
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
+ # which are not required for using pretrained model
+ if "kernel" in name or "proj" in name:
+ array = np.transpose(array)
+ if ("r_r_bias" in name or "r_w_bias" in name) and len(pointer) > 1:
+ # Here we will split the TF weights
+ assert len(pointer) == array.shape[0]
+ for i, p_i in enumerate(pointer):
+ arr_i = array[i, ...]
+ try:
+ assert p_i.shape == arr_i.shape
+ except AssertionError as e:
+ e.args += (p_i.shape, arr_i.shape)
+ raise
+ logger.info(f"Initialize PyTorch weight {name} for layer {i}")
+ p_i.data = torch.from_numpy(arr_i)
+ else:
+ try:
+ assert (
+ pointer.shape == array.shape
+ ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
+ except AssertionError as e:
+ e.args += (pointer.shape, array.shape)
+ raise
+ logger.info(f"Initialize PyTorch weight {name}")
+ pointer.data = torch.from_numpy(array)
+ tf_weights.pop(name, None)
+ tf_weights.pop(name + "/Adam", None)
+ tf_weights.pop(name + "/Adam_1", None)
+
+ logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}")
+ return model
+
+
+class PositionalEmbedding(nn.Module):
+ def __init__(self, demb):
+ super().__init__()
+
+ self.demb = demb
+
+ inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
+ self.register_buffer("inv_freq", inv_freq)
+
+ def forward(self, pos_seq, bsz=None):
+ sinusoid_inp = torch.outer(pos_seq, self.inv_freq)
+ pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
+
+ if bsz is not None:
+ return pos_emb[:, None, :].expand(-1, bsz, -1)
+ else:
+ return pos_emb[:, None, :]
+
+
+class PositionwiseFF(nn.Module):
+ def __init__(self, d_model, d_inner, dropout, pre_lnorm=False, layer_norm_epsilon=1e-5):
+ super().__init__()
+
+ self.d_model = d_model
+ self.d_inner = d_inner
+ self.dropout = dropout
+
+ self.CoreNet = nn.Sequential(
+ nn.Linear(d_model, d_inner),
+ nn.ReLU(inplace=True),
+ nn.Dropout(dropout),
+ nn.Linear(d_inner, d_model),
+ nn.Dropout(dropout),
+ )
+
+ self.layer_norm = nn.LayerNorm(d_model, eps=layer_norm_epsilon)
+
+ self.pre_lnorm = pre_lnorm
+
+ def forward(self, inp):
+ if self.pre_lnorm:
+ # layer normalization + positionwise feed-forward
+ core_out = self.CoreNet(self.layer_norm(inp))
+
+ # residual connection
+ output = core_out + inp
+ else:
+ # positionwise feed-forward
+ core_out = self.CoreNet(inp)
+
+ # residual connection + layer normalization
+ output = self.layer_norm(inp + core_out)
+
+ return output
+
+
+class RelPartialLearnableMultiHeadAttn(nn.Module):
+ def __init__(
+ self,
+ n_head,
+ d_model,
+ d_head,
+ dropout,
+ dropatt=0,
+ pre_lnorm=False,
+ r_r_bias=None,
+ r_w_bias=None,
+ layer_norm_epsilon=1e-5,
+ ):
+ super().__init__()
+
+ self.n_head = n_head
+ self.d_model = d_model
+ self.d_head = d_head
+ self.dropout = dropout
+
+ self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False)
+
+ self.drop = nn.Dropout(dropout)
+ self.dropatt = nn.Dropout(dropatt)
+ self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
+
+ self.layer_norm = nn.LayerNorm(d_model, eps=layer_norm_epsilon)
+
+ self.scale = 1 / (d_head**0.5)
+
+ self.pre_lnorm = pre_lnorm
+
+ if r_r_bias is None or r_w_bias is None: # Biases are not shared
+ self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
+ self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
+ else:
+ self.r_r_bias = r_r_bias
+ self.r_w_bias = r_w_bias
+
+ self.r_net = nn.Linear(self.d_model, self.n_head * self.d_head, bias=False)
+
+ def _rel_shift(self, x):
+ zero_pad_shape = (x.size(0), 1) + x.size()[2:]
+ zero_pad = torch.zeros(zero_pad_shape, device=x.device, dtype=x.dtype)
+ x_padded = torch.cat([zero_pad, x], dim=1)
+
+ x_padded_shape = (x.size(1) + 1, x.size(0)) + x.size()[2:]
+ x_padded = x_padded.view(*x_padded_shape)
+
+ x = x_padded[1:].view_as(x)
+
+ return x
+
+ def forward(self, w, r, attn_mask=None, mems=None, head_mask=None, output_attentions=False):
+ qlen, rlen, bsz = w.size(0), r.size(0), w.size(1)
+
+ if mems is not None:
+ cat = torch.cat([mems, w], 0)
+ if self.pre_lnorm:
+ w_heads = self.qkv_net(self.layer_norm(cat))
+ else:
+ w_heads = self.qkv_net(cat)
+ r_head_k = self.r_net(r)
+
+ w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
+ w_head_q = w_head_q[-qlen:]
+ else:
+ if self.pre_lnorm:
+ w_heads = self.qkv_net(self.layer_norm(w))
+ else:
+ w_heads = self.qkv_net(w)
+ r_head_k = self.r_net(r)
+
+ w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
+
+ klen = w_head_k.size(0)
+
+ w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
+ w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
+ w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
+
+ r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) # qlen x n_head x d_head
+
+ # compute attention score
+ rw_head_q = w_head_q + self.r_w_bias # qlen x bsz x n_head x d_head
+ AC = torch.einsum("ibnd,jbnd->ijbn", (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head
+
+ rr_head_q = w_head_q + self.r_r_bias
+ BD = torch.einsum("ibnd,jnd->ijbn", (rr_head_q, r_head_k)) # qlen x klen x bsz x n_head
+ BD = self._rel_shift(BD)
+
+ # [qlen x klen x bsz x n_head]
+ attn_score = AC + BD
+ attn_score.mul_(self.scale)
+
+ mask_value = torch.finfo(attn_score.dtype).min
+
+ # compute attention probability
+ if attn_mask is not None and torch.sum(attn_mask).item():
+ attn_mask = attn_mask == 1 # Switch to bool
+ if attn_mask.dim() == 2:
+ attn_score = (
+ attn_score.float().masked_fill(attn_mask[None, :, :, None], mask_value).type_as(attn_score)
+ )
+ elif attn_mask.dim() == 3:
+ attn_score = attn_score.float().masked_fill(attn_mask[:, :, :, None], mask_value).type_as(attn_score)
+
+ # [qlen x klen x bsz x n_head]
+ attn_prob = nn.functional.softmax(attn_score, dim=1)
+ attn_prob = self.dropatt(attn_prob)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attn_prob = attn_prob * head_mask
+
+ # compute attention vector
+ attn_vec = torch.einsum("ijbn,jbnd->ibnd", (attn_prob, w_head_v))
+
+ # [qlen x bsz x n_head x d_head]
+ attn_vec = attn_vec.contiguous().view(attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
+
+ # linear projection
+ attn_out = self.o_net(attn_vec)
+ attn_out = self.drop(attn_out)
+
+ if self.pre_lnorm:
+ # residual connection
+ outputs = [w + attn_out]
+ else:
+ # residual connection + layer normalization
+ outputs = [self.layer_norm(w + attn_out)]
+
+ if output_attentions:
+ outputs.append(attn_prob)
+
+ return outputs
+
+
+class RelPartialLearnableDecoderLayer(nn.Module):
+ def __init__(self, n_head, d_model, d_head, d_inner, dropout, layer_norm_epsilon=1e-5, **kwargs):
+ super().__init__()
+
+ self.dec_attn = RelPartialLearnableMultiHeadAttn(
+ n_head, d_model, d_head, dropout, layer_norm_epsilon=layer_norm_epsilon, **kwargs
+ )
+ self.pos_ff = PositionwiseFF(
+ d_model, d_inner, dropout, pre_lnorm=kwargs.get("pre_lnorm"), layer_norm_epsilon=layer_norm_epsilon
+ )
+
+ def forward(self, dec_inp, r, dec_attn_mask=None, mems=None, head_mask=None, output_attentions=False):
+ attn_outputs = self.dec_attn(
+ dec_inp,
+ r,
+ attn_mask=dec_attn_mask,
+ mems=mems,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ )
+ ff_output = self.pos_ff(attn_outputs[0])
+
+ outputs = [ff_output] + attn_outputs[1:]
+
+ return outputs
+
+
+class AdaptiveEmbedding(nn.Module):
+ def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, sample_softmax=False):
+ super().__init__()
+
+ self.n_token = n_token
+ self.d_embed = d_embed
+
+ self.cutoffs = cutoffs + [n_token]
+ self.div_val = div_val
+ self.d_proj = d_proj
+
+ self.emb_scale = d_proj**0.5
+
+ self.cutoff_ends = [0] + self.cutoffs
+
+ self.emb_layers = nn.ModuleList()
+ self.emb_projs = nn.ParameterList()
+ if div_val == 1:
+ self.emb_layers.append(nn.Embedding(n_token, d_embed, sparse=sample_softmax > 0))
+ if d_proj != d_embed:
+ self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
+ else:
+ for i in range(len(self.cutoffs)):
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
+ d_emb_i = d_embed // (div_val**i)
+ self.emb_layers.append(nn.Embedding(r_idx - l_idx, d_emb_i))
+ self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i)))
+
+ def forward(self, inp):
+ if self.div_val == 1:
+ embed = self.emb_layers[0](inp)
+ if self.d_proj != self.d_embed:
+ embed = nn.functional.linear(embed, self.emb_projs[0])
+ else:
+ param = next(self.parameters())
+ inp_flat = inp.view(-1)
+ emb_flat = torch.zeros([inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device)
+ for i in range(len(self.cutoffs)):
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
+
+ mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
+ indices_i = mask_i.nonzero().squeeze()
+
+ if indices_i.numel() == 0:
+ continue
+
+ inp_i = inp_flat.index_select(0, indices_i) - l_idx
+ emb_i = self.emb_layers[i](inp_i)
+ emb_i = nn.functional.linear(emb_i, self.emb_projs[i])
+
+ emb_flat.index_copy_(0, indices_i, emb_i)
+
+ embed_shape = inp.size() + (self.d_proj,)
+ embed = emb_flat.view(embed_shape)
+
+ embed.mul_(self.emb_scale)
+
+ return embed
+
+
+class TransfoXLPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = TransfoXLConfig
+ load_tf_weights = load_tf_weights_in_transfo_xl
+ base_model_prefix = "transformer"
+
+ def _init_weight(self, weight):
+ if self.config.init == "uniform":
+ nn.init.uniform_(weight, -self.config.init_range, self.config.init_range)
+ elif self.config.init == "normal":
+ nn.init.normal_(weight, 0.0, self.config.init_std)
+
+ def _init_bias(self, bias):
+ nn.init.constant_(bias, 0.0)
+
+ def _init_weights(self, m):
+ """Initialize the weights."""
+ classname = m.__class__.__name__
+ if classname.find("Linear") != -1:
+ if hasattr(m, "weight") and m.weight is not None:
+ self._init_weight(m.weight)
+ if hasattr(m, "bias") and m.bias is not None:
+ self._init_bias(m.bias)
+ elif classname.find("AdaptiveEmbedding") != -1:
+ if hasattr(m, "emb_projs"):
+ for i in range(len(m.emb_projs)):
+ if m.emb_projs[i] is not None:
+ nn.init.normal_(m.emb_projs[i], 0.0, self.config.proj_init_std)
+ elif classname.find("Embedding") != -1:
+ if hasattr(m, "weight"):
+ self._init_weight(m.weight)
+ elif classname.find("ProjectedAdaptiveLogSoftmax") != -1:
+ if hasattr(m, "cluster_weight") and m.cluster_weight is not None:
+ self._init_weight(m.cluster_weight)
+ if hasattr(m, "cluster_bias") and m.cluster_bias is not None:
+ self._init_bias(m.cluster_bias)
+ if hasattr(m, "out_projs"):
+ for i in range(len(m.out_projs)):
+ if m.out_projs[i] is not None:
+ nn.init.normal_(m.out_projs[i], 0.0, self.config.proj_init_std)
+ elif classname.find("LayerNorm") != -1:
+ if hasattr(m, "weight"):
+ nn.init.normal_(m.weight, 1.0, self.config.init_std)
+ if hasattr(m, "bias") and m.bias is not None:
+ self._init_bias(m.bias)
+ else:
+ if hasattr(m, "r_emb"):
+ self._init_weight(m.r_emb)
+ if hasattr(m, "r_w_bias"):
+ self._init_weight(m.r_w_bias)
+ if hasattr(m, "r_r_bias"):
+ self._init_weight(m.r_r_bias)
+ if hasattr(m, "r_bias"):
+ self._init_bias(m.r_bias)
+
+ def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, layer: Optional[int] = -1):
+ """
+ Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size. Take care of tying
+ weights embeddings afterwards if the model class has a *tie_weights()* method.
+
+ Arguments:
+ new_num_tokens: (*optional*) int:
+ New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at
+ the end. Reducing the size will remove vectors from the end. If not provided or None: does nothing and
+ just returns a pointer to the input tokens `torch.nn.Embeddings` Module of the model.
+ layer: (*optional*) int:
+ Layer of the *AdaptiveEmbedding* where the resizing should be done. Per default the last layer will be
+ resized. Be aware that when resizing other than the last layer, you have to ensure that the new
+ token(s) in the tokenizer are at the corresponding position.
+
+ Return: `torch.nn.Embeddings` Pointer to the input tokens Embeddings Module of the model
+ """
+ base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed
+
+ if new_num_tokens is None:
+ return self.get_input_embeddings()
+
+ new_num_tokens_layer, layer = self._get_new_num_tokens_layer(new_num_tokens, layer)
+ assert new_num_tokens_layer > 0, "The size of the new embedding layer cannot be 0 or less"
+ model_embeds = base_model._resize_token_embeddings(new_num_tokens_layer, layer)
+
+ # Update base model and current model config
+ self.config.vocab_size = new_num_tokens
+ base_model.vocab_size = new_num_tokens
+ base_model.n_token = new_num_tokens
+
+ new_embedding_shapes = self._get_embedding_shapes()
+ self._resize_cutoffs(new_num_tokens, new_num_tokens_layer, new_embedding_shapes, layer)
+
+ # Tie weights again if needed
+ self.tie_weights()
+
+ return model_embeds
+
+ def _get_new_num_tokens_layer(self, new_num_tokens, layer):
+ embeddings = self.get_input_embeddings()
+ if layer == -1:
+ layer = len(embeddings.emb_layers) - 1
+ assert 0 <= layer <= len(embeddings.emb_layers) - 1
+
+ new_num_tokens_layer = (
+ new_num_tokens
+ - sum([emb.weight.shape[0] for emb in embeddings.emb_layers[:layer]])
+ - sum([emb.weight.shape[0] for emb in embeddings.emb_layers[layer + 1 :]])
+ )
+ return new_num_tokens_layer, layer
+
+ def _get_embedding_shapes(self):
+ embeddings = self.get_input_embeddings()
+ return [emb.weight.shape[0] for emb in embeddings.emb_layers]
+
+ def _resize_token_embeddings(self, new_num_tokens, layer=-1):
+ embeddings = self.get_input_embeddings()
+ if new_num_tokens is None:
+ return embeddings
+ new_embeddings_layer = self._get_resized_embeddings(embeddings.emb_layers[layer], new_num_tokens)
+ embeddings.emb_layers[layer] = new_embeddings_layer
+
+ self.set_input_embeddings(embeddings)
+
+ return self.get_input_embeddings()
+
+ def _resize_cutoffs(self, new_num_tokens, new_emb_size, new_embedding_shapes, layer):
+ embeddings = self.get_input_embeddings()
+
+ for i in range(layer, len(embeddings.cutoffs)):
+ embeddings.cutoffs[i] = sum(new_embedding_shapes[: i + 1])
+
+ embeddings.cutoff_ends = [0] + embeddings.cutoffs
+ embeddings.n_token = new_num_tokens
+
+ self.config.cutoffs = embeddings.cutoffs[:-1]
+
+ return embeddings.cutoffs
+
+
+@dataclass
+class TransfoXLModelOutput(ModelOutput):
+ """
+ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ mems (`List[torch.FloatTensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`
+ input) to speed up sequential decoding. The token ids which have their past given to this model should not
+ be passed as input ids as they have already been computed.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ last_hidden_state: torch.FloatTensor
+ mems: List[torch.FloatTensor] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class TransfoXLSequenceClassifierOutputWithPast(ModelOutput):
+ """
+ Base class for outputs of sentence classification models.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Classification (or regression if config.num_labels==1) loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
+ mems (`List[torch.FloatTensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`
+ input) to speed up sequential decoding. The token ids which have their past given to this model should not
+ be passed as input ids as they have already been computed.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ mems: List[torch.FloatTensor] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class TransfoXLLMHeadModelOutput(ModelOutput):
+ """
+ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
+
+ Args:
+ losses (`torch.FloatTensor` of shape *(batch_size, sequence_length-1)*, *optional*, returned when `labels` is provided):
+ Language modeling losses (not reduced).
+ prediction_scores (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token after SoftMax).
+ mems (`List[torch.FloatTensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`
+ input) to speed up sequential decoding. The token ids which have their past given to this model should not
+ be passed as input ids as they have already been computed.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ loss (`torch.FloatTensor` of shape `()`, *optional*, returned when `labels` is provided)
+ Reduced language modeling loss.
+ """
+
+ losses: Optional[torch.FloatTensor] = None
+ prediction_scores: torch.FloatTensor = None
+ mems: List[torch.FloatTensor] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+ loss: Optional[torch.FloatTensor] = None
+
+ @property
+ def logits(self):
+ # prediction scores are the output of the adaptive softmax, see
+ # the file `modeling_transfo_xl_utilities`. Since the adaptive
+ # softmax returns the log softmax value, `self.prediction_scores`
+ # are strictly speaking not exactly `logits`, but behave the same
+ # way logits do.
+ return self.prediction_scores
+
+
+TRANSFO_XL_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`TransfoXLConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+TRANSFO_XL_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ mems (`List[torch.FloatTensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see
+ `mems` output below). Can be used to speed up sequential decoding. The token ids which have their mems
+ given to this model should not be passed as `input_ids` as they have already been computed.
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
+ TRANSFO_XL_START_DOCSTRING,
+)
+class TransfoXLModel(TransfoXLPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.n_token = config.vocab_size
+
+ self.d_embed = config.d_embed
+ self.d_model = config.d_model
+ self.n_head = config.n_head
+ self.d_head = config.d_head
+
+ self.word_emb = AdaptiveEmbedding(
+ config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val
+ )
+
+ self.drop = nn.Dropout(config.dropout)
+
+ self.n_layer = config.n_layer
+ self.mem_len = config.mem_len
+ self.attn_type = config.attn_type
+
+ if not config.untie_r:
+ self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
+ self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
+
+ self.layers = nn.ModuleList()
+ if config.attn_type == 0: # the default attention
+ for i in range(config.n_layer):
+ self.layers.append(
+ RelPartialLearnableDecoderLayer(
+ config.n_head,
+ config.d_model,
+ config.d_head,
+ config.d_inner,
+ config.dropout,
+ dropatt=config.dropatt,
+ pre_lnorm=config.pre_lnorm,
+ r_w_bias=None if config.untie_r else self.r_w_bias,
+ r_r_bias=None if config.untie_r else self.r_r_bias,
+ layer_norm_epsilon=config.layer_norm_epsilon,
+ )
+ )
+ else: # learnable embeddings and absolute embeddings are not used in our pretrained checkpoints
+ raise NotImplementedError # Removed them to avoid maintaining dead code
+
+ self.same_length = config.same_length
+ self.clamp_len = config.clamp_len
+
+ if self.attn_type == 0: # default attention
+ self.pos_emb = PositionalEmbedding(self.d_model)
+ else: # learnable embeddings and absolute embeddings
+ raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.word_emb
+
+ def set_input_embeddings(self, new_embeddings):
+ self.word_emb = new_embeddings
+
+ def backward_compatible(self):
+ self.sample_softmax = -1
+
+ def reset_memory_length(self, mem_len):
+ self.mem_len = mem_len
+
+ def _prune_heads(self, heads):
+ logger.info("Head pruning is not implemented for Transformer-XL model")
+ pass
+
+ def init_mems(self, bsz):
+ if self.mem_len > 0:
+ mems = []
+ param = next(self.parameters())
+ for i in range(self.n_layer):
+ empty = torch.zeros(self.mem_len, bsz, self.config.d_model, dtype=param.dtype, device=param.device)
+ mems.append(empty)
+
+ return mems
+ else:
+ return None
+
+ def _update_mems(self, hids, mems, mlen, qlen):
+ # does not deal with None
+ if mems is None:
+ return None
+
+ # mems is not None
+ assert len(hids) == len(mems), "len(hids) != len(mems)"
+
+ # There are `mlen + qlen` steps that can be cached into mems
+ with torch.no_grad():
+ new_mems = []
+ end_idx = mlen + max(0, qlen)
+ beg_idx = max(0, end_idx - self.mem_len)
+ for i in range(len(hids)):
+ cat = torch.cat([mems[i], hids[i]], dim=0)
+ new_mems.append(cat[beg_idx:end_idx].detach())
+
+ return new_mems
+
+ @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TransfoXLModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ mems: Optional[List[torch.FloatTensor]] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, TransfoXLModelOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library
+ # so we transpose here from shape [bsz, len] to shape [len, bsz]
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_ids = input_ids.transpose(0, 1).contiguous()
+ qlen, bsz = input_ids.size()
+ elif inputs_embeds is not None:
+ inputs_embeds = inputs_embeds.transpose(0, 1).contiguous()
+ qlen, bsz = inputs_embeds.shape[0], inputs_embeds.shape[1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if mems is None:
+ mems = self.init_mems(bsz)
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
+ # and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
+ if head_mask is not None:
+ if head_mask.dim() == 1:
+ head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0)
+ head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1)
+ elif head_mask.dim() == 2:
+ head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1)
+ head_mask = head_mask.to(
+ dtype=next(self.parameters()).dtype
+ ) # switch to float if need + fp16 compatibility
+ else:
+ head_mask = [None] * self.n_layer
+
+ if inputs_embeds is not None:
+ word_emb = inputs_embeds
+ else:
+ word_emb = self.word_emb(input_ids)
+
+ mlen = mems[0].size(0) if mems is not None else 0
+ klen = mlen + qlen
+ if self.same_length:
+ all_ones = word_emb.new_ones((qlen, klen), dtype=torch.bool)
+ mask_len = klen - self.mem_len
+ if mask_len > 0:
+ mask_shift_len = qlen - mask_len
+ else:
+ mask_shift_len = qlen
+ dec_attn_mask = (torch.triu(all_ones, 1 + mlen) + torch.tril(all_ones, -mask_shift_len))[:, :, None] # -1
+ else:
+ dec_attn_mask = torch.triu(word_emb.new_ones((qlen, klen), dtype=torch.bool), diagonal=1 + mlen)[
+ :, :, None
+ ]
+
+ hids = []
+ attentions = [] if output_attentions else None
+ if self.attn_type == 0: # default
+ pos_seq = torch.arange(klen - 1, -1, -1.0, device=word_emb.device, dtype=torch.int64).type_as(
+ dtype=word_emb.dtype
+ )
+ if self.clamp_len > 0:
+ pos_seq.clamp_(max=self.clamp_len)
+ pos_emb = self.pos_emb(pos_seq)
+
+ core_out = self.drop(word_emb)
+ pos_emb = self.drop(pos_emb)
+
+ for i, layer in enumerate(self.layers):
+ hids.append(core_out)
+ mems_i = None if mems is None else mems[i]
+ layer_outputs = layer(
+ core_out,
+ pos_emb,
+ dec_attn_mask=dec_attn_mask,
+ mems=mems_i,
+ head_mask=head_mask[i],
+ output_attentions=output_attentions,
+ )
+ core_out = layer_outputs[0]
+ if output_attentions:
+ attentions.append(layer_outputs[1])
+ else: # learnable embeddings and absolute embeddings
+ raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
+
+ core_out = self.drop(core_out)
+
+ new_mems = self._update_mems(hids, mems, mlen, qlen)
+
+ if output_hidden_states:
+ # Add last layer and transpose to library standard shape [bsz, len, hidden_dim]
+ hids.append(core_out)
+ hids = tuple(t.transpose(0, 1).contiguous() for t in hids)
+ else:
+ hids = None
+ if output_attentions:
+ # Transpose to library standard shape [bsz, n_heads, query_seq_len, key_seq_len]
+ attentions = tuple(t.permute(2, 3, 0, 1).contiguous() for t in attentions)
+ # We transpose back here to shape [bsz, len, hidden_dim]
+ core_out = core_out.transpose(0, 1).contiguous()
+
+ if not return_dict:
+ return tuple(v for v in [core_out, new_mems, hids, attentions] if v is not None)
+
+ return TransfoXLModelOutput(
+ last_hidden_state=core_out,
+ mems=new_mems,
+ hidden_states=hids,
+ attentions=attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ The Transformer-XL Model with a language modeling head on top (adaptive softmax with weights tied to the adaptive
+ input embeddings)
+ """,
+ TRANSFO_XL_START_DOCSTRING,
+)
+class TransfoXLLMHeadModel(TransfoXLPreTrainedModel):
+ _tied_weights_keys = [r"crit\.out_projs\.\d+", r"crit\.out_layers\.\d+\.weight"]
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.transformer = TransfoXLModel(config)
+ self.sample_softmax = config.sample_softmax
+ self.trainer_compatible = getattr(config, "trainer_compatible", False)
+
+ if not self.trainer_compatible:
+ warnings.warn(
+ "The output of TransfoXL will be updated in v5 to support a single loss as first argument. In order "
+ "to use that updated output, please specify `trainer_compatible=True` as your configuration"
+ " attribute.",
+ DeprecationWarning,
+ )
+
+ assert self.sample_softmax <= 0, (
+ "Sampling from the softmax is not implemented yet. Please look at issue: #3310:"
+ " https://github.com/huggingface/transformers/issues/3310"
+ )
+
+ self.crit = ProjectedAdaptiveLogSoftmax(
+ config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def tie_weights(self):
+ """
+ Run this to be sure output and input (adaptive) softmax weights are tied
+ """
+
+ if self.config.tie_word_embeddings:
+ for i in range(len(self.crit.out_layers)):
+ self._tie_or_clone_weights(self.crit.out_layers[i], self.transformer.word_emb.emb_layers[i])
+ if self.config.tie_projs:
+ for i, tie_proj in enumerate(self.config.tie_projs):
+ if tie_proj and self.config.div_val == 1 and self.config.d_model != self.config.d_embed:
+ if self.config.torchscript:
+ self.crit.out_projs[i] = nn.Parameter(self.transformer.word_emb.emb_projs[0].clone())
+ else:
+ self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[0]
+ elif tie_proj and self.config.div_val != 1:
+ if self.config.torchscript:
+ self.crit.out_projs[i] = nn.Parameter(self.transformer.word_emb.emb_projs[i].clone())
+ else:
+ self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[i]
+
+ def reset_memory_length(self, mem_len):
+ self.transformer.reset_memory_length(mem_len)
+
+ def init_mems(self, bsz):
+ return self.transformer.init_mems(bsz)
+
+ @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TransfoXLLMHeadModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ mems: Optional[List[torch.FloatTensor]] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, TransfoXLLMHeadModelOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ if input_ids is not None:
+ bsz, tgt_len = input_ids.size(0), input_ids.size(1)
+ elif inputs_embeds is not None:
+ bsz, tgt_len = inputs_embeds.size(0), inputs_embeds.size(1)
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ transformer_outputs = self.transformer(
+ input_ids,
+ mems=mems,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ last_hidden = transformer_outputs[0]
+ pred_hid = last_hidden[:, -tgt_len:]
+
+ if labels is not None:
+ # Prevents all labels being -100 and throwing an error
+ # when backwarding the loss
+ miss_valid_label = labels[0, 1:].sum() == (labels.size(1) - 1) * -100
+ if miss_valid_label:
+ # Sets an token, just to prevent loss from being NaN
+ labels[0, 1] = self.config.eos_token_id
+
+ softmax_output = self.crit(pred_hid, labels)
+ prediction_scores = softmax_output.view(bsz, tgt_len, -1) if labels is None else ()
+
+ if labels is not None:
+ losses = softmax_output.view(bsz, tgt_len - 1)
+ # Avoids from incorporating padding (-100) tokens into loss value
+ loss = losses[losses != 0].mean()
+ else:
+ losses, loss = None, None
+
+ if not return_dict:
+ if self.trainer_compatible:
+ output = (prediction_scores, losses) if losses is not None else (prediction_scores,)
+ output += transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+ else:
+ output = (prediction_scores, *transformer_outputs[1:])
+ output = ((losses,) + output) if losses is not None else output
+ return (output + (loss,)) if loss is not None else output
+
+ return TransfoXLLMHeadModelOutput(
+ loss=loss,
+ prediction_scores=prediction_scores,
+ losses=losses,
+ mems=transformer_outputs.mems,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+ def get_output_embeddings(self):
+ """Double-check if you are using adaptive softmax."""
+ if self.sample_softmax > 0:
+ return self.out_layer
+ else:
+ return self.crit.out_layers[-1]
+
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **model_kwargs):
+ inputs = {}
+
+ # if past is defined in model kwargs then use it for faster decoding
+ if past_key_values:
+ inputs["mems"] = past_key_values
+ inputs["input_ids"] = input_ids[:, -1].unsqueeze(-1)
+ else:
+ inputs["input_ids"] = input_ids
+
+ return inputs
+
+ def _resize_cutoffs(self, new_num_tokens, new_emb_size, new_embedding_shapes, layer):
+ new_cutoffs = super()._resize_cutoffs(new_num_tokens, new_emb_size, new_embedding_shapes, layer)
+
+ self.crit.cutoffs = new_cutoffs
+ self.crit.cutoff_ends = [0] + new_cutoffs
+ self.crit.n_token = new_num_tokens
+
+ @staticmethod
+ def _reorder_cache(mems: List[torch.Tensor], beam_idx: torch.Tensor) -> List[torch.Tensor]:
+ """
+ This function is used to re-order the `mems` cache if [`~PreTrainedModel.beam_search`] or
+ [`~PreTrainedModel.beam_sample`] is called. This is required to match `mems` with the correct beam_idx at every
+ generation step.
+ """
+ return [layer_past.index_select(1, beam_idx.to(layer_past.device)) for layer_past in mems]
+
+
+@add_start_docstrings(
+ """
+ The Transformer-XL Model transformer with a sequence classification head on top (linear layer).
+
+ [`TransfoXLForSequenceClassification`] uses the last token in order to do the classification, as other causal
+ models (e.g. GPT-1) do.
+
+ Since it does classification on the last token, it requires to know the position of the last token. If a
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
+ each row of the batch).
+ """,
+ TRANSFO_XL_START_DOCSTRING,
+)
+class TransfoXLForSequenceClassification(TransfoXLPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.transformer = TransfoXLModel(config)
+ self.score = nn.Linear(config.d_embed, self.num_labels, bias=False)
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TransfoXLSequenceClassifierOutputWithPast,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ mems: Optional[List[torch.FloatTensor]] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, TransfoXLSequenceClassifierOutputWithPast]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.transformer(
+ input_ids,
+ mems=mems,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = transformer_outputs[0]
+ logits = self.score(hidden_states)
+
+ if input_ids is not None:
+ batch_size, sequence_length = input_ids.shape[:2]
+ else:
+ batch_size, sequence_length = inputs_embeds.shape[:2]
+
+ assert (
+ self.config.pad_token_id is not None or batch_size == 1
+ ), "Cannot handle batch sizes > 1 if no padding token is defined."
+ if self.config.pad_token_id is None:
+ sequence_lengths = -1
+ else:
+ if input_ids is not None:
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
+ sequence_lengths = sequence_lengths.to(logits.device)
+ else:
+ sequence_lengths = -1
+ logger.warning(
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
+ )
+
+ pooled_logits = logits[range(batch_size), sequence_lengths]
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(pooled_logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(pooled_logits, labels)
+ if not return_dict:
+ output = (pooled_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TransfoXLSequenceClassifierOutputWithPast(
+ loss=loss,
+ logits=pooled_logits,
+ mems=transformer_outputs.mems,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_transfo_xl_utilities.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_transfo_xl_utilities.py
new file mode 100644
index 0000000000000000000000000000000000000000..addf2a08372bc00a377ab7410d977c31fb1d48eb
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_transfo_xl_utilities.py
@@ -0,0 +1,252 @@
+# coding=utf-8
+# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+ Utilities for PyTorch Transformer XL model. Directly adapted from https://github.com/kimiyoung/transformer-xl.
+"""
+
+
+import torch
+from torch import nn
+
+
+# CUDA_MAJOR = int(torch.version.cuda.split('.')[0])
+# CUDA_MINOR = int(torch.version.cuda.split('.')[1])
+
+
+class ProjectedAdaptiveLogSoftmax(nn.Module):
+ def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, keep_order=False):
+ super().__init__()
+
+ self.n_token = n_token
+ self.d_embed = d_embed
+ self.d_proj = d_proj
+
+ self.cutoffs = cutoffs + [n_token]
+ self.cutoff_ends = [0] + self.cutoffs
+ self.div_val = div_val
+
+ self.shortlist_size = self.cutoffs[0]
+ self.n_clusters = len(self.cutoffs) - 1
+ self.head_size = self.shortlist_size + self.n_clusters
+
+ if self.n_clusters > 0:
+ self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed))
+ self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
+
+ self.out_layers = nn.ModuleList()
+ self.out_projs = nn.ParameterList()
+
+ if div_val == 1:
+ for i in range(len(self.cutoffs)):
+ if d_proj != d_embed:
+ self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
+ else:
+ self.out_projs.append(None)
+
+ self.out_layers.append(nn.Linear(d_embed, n_token))
+ else:
+ for i in range(len(self.cutoffs)):
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
+ d_emb_i = d_embed // (div_val**i)
+
+ self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i)))
+
+ self.out_layers.append(nn.Linear(d_emb_i, r_idx - l_idx))
+
+ self.keep_order = keep_order
+
+ def _compute_logit(self, hidden, weight, bias, proj):
+ if proj is None:
+ logit = nn.functional.linear(hidden, weight, bias=bias)
+ else:
+ # if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
+ proj_hid = nn.functional.linear(hidden, proj.t().contiguous())
+ logit = nn.functional.linear(proj_hid, weight, bias=bias)
+ # else:
+ # logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
+ # if bias is not None:
+ # logit = logit + bias
+
+ return logit
+
+ def forward(self, hidden, labels=None, keep_order=False):
+ """
+ Params:
+ hidden :: [len*bsz x d_proj]
+ labels :: [len*bsz]
+
+ Return:
+ if labels is None: out :: [len*bsz x n_tokens] log probabilities of tokens over the vocabulary else: out ::
+ [(len-1)*bsz] Negative log likelihood. We could replace this implementation by the native PyTorch one if
+ theirs had an option to set bias on all clusters in the native one. here:
+ https://github.com/pytorch/pytorch/blob/dbe6a7a9ff1a364a8706bf5df58a1ca96d2fd9da/torch/nn/modules/adaptive.py#L138
+ """
+
+ if labels is not None:
+ # Shift so that tokens < n predict n
+ hidden = hidden[..., :-1, :].contiguous()
+ labels = labels[..., 1:].contiguous()
+ hidden = hidden.view(-1, hidden.size(-1))
+ labels = labels.view(-1)
+ if hidden.size(0) != labels.size(0):
+ raise RuntimeError("Input and labels should have the same size in the batch dimension.")
+ else:
+ hidden = hidden.view(-1, hidden.size(-1))
+
+ if self.n_clusters == 0:
+ logit = self._compute_logit(hidden, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0])
+ if labels is not None:
+ mask = labels != -100
+ out = torch.zeros_like(labels, dtype=hidden.dtype, device=hidden.device)
+ out[mask] = (
+ -nn.functional.log_softmax(logit, dim=-1)[mask].gather(1, labels[mask].unsqueeze(1)).squeeze(1)
+ )
+ else:
+ out = nn.functional.log_softmax(logit, dim=-1)
+ else:
+ # construct weights and biases
+ weights, biases = [], []
+ for i in range(len(self.cutoffs)):
+ if self.div_val == 1:
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
+ weight_i = self.out_layers[0].weight[l_idx:r_idx]
+ bias_i = self.out_layers[0].bias[l_idx:r_idx]
+ else:
+ weight_i = self.out_layers[i].weight
+ bias_i = self.out_layers[i].bias
+
+ if i == 0:
+ weight_i = torch.cat([weight_i, self.cluster_weight], dim=0)
+ bias_i = torch.cat([bias_i, self.cluster_bias], dim=0)
+
+ weights.append(weight_i)
+ biases.append(bias_i)
+
+ head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0]
+
+ head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
+ head_logprob = nn.functional.log_softmax(head_logit, dim=1)
+
+ if labels is None:
+ out = hidden.new_empty((head_logit.size(0), self.n_token))
+ else:
+ out = torch.zeros_like(labels, dtype=hidden.dtype, device=hidden.device)
+
+ offset = 0
+ cutoff_values = [0] + self.cutoffs
+ for i in range(len(cutoff_values) - 1):
+ l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
+
+ if labels is not None:
+ mask_i = (labels >= l_idx) & (labels < r_idx)
+ indices_i = mask_i.nonzero().squeeze()
+
+ if indices_i.numel() == 0:
+ continue
+
+ target_i = labels.index_select(0, indices_i) - l_idx
+ head_logprob_i = head_logprob.index_select(0, indices_i)
+ hidden_i = hidden.index_select(0, indices_i)
+ else:
+ hidden_i = hidden
+
+ if i == 0:
+ if labels is not None:
+ logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)
+ else:
+ out[:, : self.cutoffs[0]] = head_logprob[:, : self.cutoffs[0]]
+ else:
+ weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i]
+
+ tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)
+ tail_logprob_i = nn.functional.log_softmax(tail_logit_i, dim=1)
+ cluster_prob_idx = self.cutoffs[0] + i - 1 # No probability for the head cluster
+ if labels is not None:
+ logprob_i = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
+ 1, target_i[:, None]
+ ).squeeze(1)
+ else:
+ logprob_i = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
+ out[:, l_idx:r_idx] = logprob_i
+
+ if labels is not None:
+ if (hasattr(self, "keep_order") and self.keep_order) or keep_order:
+ out.index_copy_(0, indices_i, -logprob_i)
+ else:
+ out[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
+ offset += logprob_i.size(0)
+
+ return out
+
+ def log_prob(self, hidden):
+ r"""
+ Computes log probabilities for all \\(n\_classes\\) From:
+ https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/adaptive.p
+
+ Args:
+ hidden (Tensor): a minibatch of example
+
+ Returns:
+ log-probabilities of for each class \\(c\\) in range \\(0 <= c <= n\_classes\\), where \\(n\_classes\\) is
+ a parameter passed to `AdaptiveLogSoftmaxWithLoss` constructor. Shape:
+
+ - Input: \\((N, in\_features)\\)
+ - Output: \\((N, n\_classes)\\)
+ """
+ if self.n_clusters == 0:
+ logit = self._compute_logit(hidden, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0])
+ return nn.functional.log_softmax(logit, dim=-1)
+ else:
+ # construct weights and biases
+ weights, biases = [], []
+ for i in range(len(self.cutoffs)):
+ if self.div_val == 1:
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
+ weight_i = self.out_layers[0].weight[l_idx:r_idx]
+ bias_i = self.out_layers[0].bias[l_idx:r_idx]
+ else:
+ weight_i = self.out_layers[i].weight
+ bias_i = self.out_layers[i].bias
+
+ if i == 0:
+ weight_i = torch.cat([weight_i, self.cluster_weight], dim=0)
+ bias_i = torch.cat([bias_i, self.cluster_bias], dim=0)
+
+ weights.append(weight_i)
+ biases.append(bias_i)
+
+ head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0]
+ head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
+
+ out = hidden.new_empty((head_logit.size(0), self.n_token))
+ head_logprob = nn.functional.log_softmax(head_logit, dim=1)
+
+ cutoff_values = [0] + self.cutoffs
+ for i in range(len(cutoff_values) - 1):
+ start_idx, stop_idx = cutoff_values[i], cutoff_values[i + 1]
+
+ if i == 0:
+ out[:, : self.cutoffs[0]] = head_logprob[:, : self.cutoffs[0]]
+ else:
+ weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i]
+
+ tail_logit_i = self._compute_logit(hidden, weight_i, bias_i, proj_i)
+ tail_logprob_i = nn.functional.log_softmax(tail_logit_i, dim=1)
+
+ logprob_i = head_logprob[:, -i] + tail_logprob_i
+ out[:, start_idx, stop_idx] = logprob_i
+
+ return out
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/tokenization_transfo_xl.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/tokenization_transfo_xl.py
new file mode 100644
index 0000000000000000000000000000000000000000..12d360076fba4f4bf069a365c62d7dc9629812ce
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/tokenization_transfo_xl.py
@@ -0,0 +1,830 @@
+# coding=utf-8
+# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+ Tokenization classes for Transformer XL model. Adapted from https://github.com/kimiyoung/transformer-xl.
+"""
+
+
+import glob
+import os
+import pickle
+import re
+from collections import Counter, OrderedDict
+from typing import List, Optional, Tuple
+
+import numpy as np
+
+from ....tokenization_utils import PreTrainedTokenizer
+from ....utils import (
+ cached_file,
+ is_sacremoses_available,
+ is_torch_available,
+ logging,
+ requires_backends,
+ strtobool,
+ torch_only_method,
+)
+
+
+if is_sacremoses_available():
+ import sacremoses as sm
+
+
+if is_torch_available():
+ import torch
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {
+ "pretrained_vocab_file": "vocab.pkl",
+ "pretrained_vocab_file_torch": "vocab.bin",
+ "vocab_file": "vocab.txt",
+}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "pretrained_vocab_file": {
+ "transfo-xl/transfo-xl-wt103": "https://huggingface.co/transfo-xl/transfo-xl-wt103/resolve/main/vocab.pkl",
+ }
+}
+
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
+ "transfo-xl/transfo-xl-wt103": None,
+}
+
+PRETRAINED_CORPUS_ARCHIVE_MAP = {
+ "transfo-xl/transfo-xl-wt103": "https://huggingface.co/transfo-xl/transfo-xl-wt103/resolve/main/corpus.bin",
+}
+CORPUS_NAME = "corpus.bin"
+
+MATCH_NUMBERS = r"(?<=\d)[,.](?=\d)", r" @\g<0>@ "
+DETOKENIZE_NUMBERS = [(r" @\,@ ", r","), (r" @\.@ ", r".")]
+
+
+def tokenize_numbers(text_array: List[str]) -> List[str]:
+ """
+ Splits large comma-separated numbers and floating point values. This is done by replacing commas with ' @,@ ' and
+ dots with ' @.@ '.
+
+ Args:
+ text_array: An already tokenized text as list.
+
+ Returns:
+ A list of strings with tokenized numbers.
+
+ Example:
+
+ ```python
+ >>> tokenize_numbers(["$", "5,000", "1.73", "m"])
+ ['$', '5', '@,@', '000', '1', '@.@', '73', 'm']
+ ```"""
+ tokenized = []
+ for i in range(len(text_array)):
+ reg, sub = MATCH_NUMBERS
+ replaced = re.sub(reg, sub, text_array[i]).split()
+ tokenized.extend(replaced)
+
+ return tokenized
+
+
+def detokenize_numbers(text: str) -> str:
+ """
+ Inverts the operation of *tokenize_numbers*. This is replacing ' @,@ ' and ' @.@' by ',' and '.'.
+
+ Args:
+ text: A string where the number should be detokenized.
+
+ Returns:
+ A detokenized string.
+
+ Example:
+
+ ```python
+ >>> detokenize_numbers("$ 5 @,@ 000 1 @.@ 73 m")
+ '$ 5,000 1.73 m'
+ ```"""
+ for reg, sub in DETOKENIZE_NUMBERS:
+ text = re.sub(reg, sub, text)
+ return text
+
+
+class TransfoXLTokenizer(PreTrainedTokenizer):
+ """
+ Construct a Transformer-XL tokenizer adapted from Vocab class in [the original
+ code](https://github.com/kimiyoung/transformer-xl). The Transformer-XL tokenizer is a word-level tokenizer (no
+ sub-word tokenization).
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ special (`List[str]`, *optional*):
+ A list of special tokens (to be treated by the original implementation of this tokenizer).
+ min_freq (`int`, *optional*, defaults to 0):
+ The minimum number of times a token has to be present in order to be kept in the vocabulary (otherwise it
+ will be mapped to `unk_token`).
+ max_size (`int`, *optional*):
+ The maximum size of the vocabulary. If left unset, it will default to the size of the vocabulary found
+ after excluding the tokens according to the `min_freq` rule.
+ lower_case (`bool`, *optional*, defaults to `False`):
+ Whether or not to lowercase the input when tokenizing.
+ delimiter (`str`, *optional*):
+ The delimiter used between tokens.
+ vocab_file (`str`, *optional*):
+ File containing the vocabulary (from the original implementation).
+ pretrained_vocab_file (`str`, *optional*):
+ File containing the vocabulary as saved with the `save_pretrained()` method.
+ never_split (`List[str]`, *optional*):
+ List of tokens that should never be split. If no list is specified, will simply use the existing special
+ tokens.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+ additional_special_tokens (`List[str]`, *optional*, defaults to `['']`):
+ A list of additional special tokens (for the HuggingFace functionality).
+ language (`str`, *optional*, defaults to `"en"`):
+ The language of this tokenizer (used for mose preprocessing).
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ model_input_names = ["input_ids"]
+
+ def __init__(
+ self,
+ special=None,
+ min_freq=0,
+ max_size=None,
+ lower_case=False,
+ delimiter=None,
+ vocab_file=None,
+ pretrained_vocab_file: str = None,
+ never_split=None,
+ unk_token="",
+ eos_token="",
+ additional_special_tokens=[""],
+ language="en",
+ **kwargs,
+ ):
+ logger.error(
+ "`TransfoXL` was deprecated due to security issues linked to `pickle.load` in `TransfoXLTokenizer`. "
+ "See more details on this model's documentation page: "
+ "`https://github.com/huggingface/transformers/blob/main/docs/source/en/model_doc/transfo-xl.md`."
+ )
+
+ requires_backends(self, "sacremoses")
+ if special is None:
+ special = []
+ self.counter = Counter()
+ self.special = special
+ self.min_freq = min_freq
+ self.max_size = max_size
+ self.lower_case = lower_case
+ self.delimiter = delimiter
+ self.vocab_file = vocab_file
+ self.punctuation_symbols = '!"#$%&()*+,-./\\:;<=>?@[\\]^_`{|}~'
+ self.punction_without_space_before_pattern = re.compile(rf"[^\s][{self.punctuation_symbols}]")
+ self.punctuation_with_space_around_pattern = self._compile_space_around_punctuation_pattern()
+ self.language = language
+ self.moses_punct_normalizer = sm.MosesPunctNormalizer(language)
+ self.moses_tokenizer = sm.MosesTokenizer(language)
+ self.moses_detokenizer = sm.MosesDetokenizer(language)
+ self.idx2sym = []
+ self.sym2idx = OrderedDict()
+ # This try... catch... is not beautiful but honestly this tokenizer was not made to be used
+ # in a library like ours, at all.
+ try:
+ vocab_dict = None
+ if pretrained_vocab_file is not None:
+ # Priority on pickle files (support PyTorch and TF)
+ if not strtobool(os.environ.get("TRUST_REMOTE_CODE", "False")):
+ raise ValueError(
+ "This part uses `pickle.load` which is insecure and will execute arbitrary code that is "
+ "potentially malicious. It's recommended to never unpickle data that could have come from an "
+ "untrusted source, or that could have been tampered with. If you already verified the pickle "
+ "data and decided to use it, you can set the environment variable "
+ "`TRUST_REMOTE_CODE` to `True` to allow it."
+ )
+ with open(pretrained_vocab_file, "rb") as f:
+ vocab_dict = pickle.load(f)
+
+ # Loading a torch-saved transfo-xl vocab dict with pickle results in an integer
+ # Entering this if statement means that we tried to load a torch-saved file with pickle, and we failed.
+ # We therefore load it with torch, if it's available.
+ if isinstance(vocab_dict, int):
+ if not is_torch_available():
+ raise ImportError(
+ "Not trying to load dict with PyTorch as you need to install pytorch to load "
+ "from a PyTorch pretrained vocabulary, "
+ "or activate it with environment variables USE_TORCH=1 and USE_TF=0."
+ )
+ vocab_dict = torch.load(pretrained_vocab_file)
+
+ if vocab_dict is not None:
+ for key, value in vocab_dict.items():
+ if key not in self.__dict__ or key in ["sym2idx", "idx2sym"]:
+ self.__dict__[key] = value
+ elif vocab_file is not None:
+ self.build_vocab()
+
+ except Exception as e:
+ raise ValueError(
+ f"Unable to parse file {pretrained_vocab_file}. Unknown format. "
+ "If you tried to load a model saved through TransfoXLTokenizerFast, "
+ "please note they are not compatible."
+ ) from e
+
+ if vocab_file is not None:
+ self.build_vocab()
+
+ super().__init__(
+ special=special,
+ min_freq=min_freq,
+ max_size=max_size,
+ lower_case=lower_case,
+ delimiter=delimiter,
+ vocab_file=vocab_file,
+ pretrained_vocab_file=pretrained_vocab_file,
+ never_split=never_split,
+ unk_token=unk_token,
+ eos_token=eos_token,
+ additional_special_tokens=additional_special_tokens,
+ language=language,
+ **kwargs,
+ )
+
+ # these are not required to initialize the parent class as only used when tokenizing.
+ if never_split is None:
+ never_split = self.all_special_tokens
+ self.never_split = never_split
+
+ @property
+ def do_lower_case(self):
+ return self.lower_case
+
+ def _compile_space_around_punctuation_pattern(self):
+ look_ahead_for_special_token = f"(?=[{self.punctuation_symbols}])"
+ look_ahead_to_match_all_except_space = r"(?=[^\s])"
+ return re.compile(r"" + look_ahead_for_special_token + look_ahead_to_match_all_except_space)
+
+ def count_file(self, path, verbose=False, add_eos=False):
+ if verbose:
+ logger.info(f"counting file {path} ...")
+ assert os.path.exists(path), f"Input file {path} not found"
+
+ sents = []
+ with open(path, "r", encoding="utf-8") as f:
+ for idx, line in enumerate(f):
+ if verbose and idx > 0 and idx % 500000 == 0:
+ logger.info(f" line {idx}")
+ symbols = self.tokenize(line, add_eos=add_eos)
+ self.counter.update(symbols)
+ sents.append(symbols)
+
+ return sents
+
+ def count_sents(self, sents, verbose=False):
+ """
+ sents : a list of sentences, each a list of tokenized symbols
+ """
+ if verbose:
+ logger.info(f"counting {len(sents)} sents ...")
+ for idx, symbols in enumerate(sents):
+ if verbose and idx > 0 and idx % 500000 == 0:
+ logger.info(f" line {idx}")
+ self.counter.update(symbols)
+
+ def _build_from_file(self, vocab_file):
+ self.idx2sym = []
+ self.sym2idx = OrderedDict()
+
+ with open(vocab_file, "r", encoding="utf-8") as f:
+ for line in f:
+ symb = line.strip().split()[0]
+ self.add_symbol(symb)
+ if "" in self.sym2idx:
+ self.unk_idx = self.sym2idx[""]
+ elif "" in self.sym2idx:
+ self.unk_idx = self.sym2idx[""]
+ else:
+ raise ValueError("Token not in vocabulary and no token in vocabulary for replacement.")
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if os.path.isdir(save_directory):
+ vocab_file = os.path.join(
+ save_directory,
+ (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["pretrained_vocab_file"],
+ )
+ else:
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
+ with open(vocab_file, "wb") as f:
+ pickle.dump(self.__dict__, f)
+ return (vocab_file,)
+
+ def build_vocab(self):
+ if self.vocab_file:
+ logger.info(f"building vocab from {self.vocab_file}")
+ self._build_from_file(self.vocab_file)
+ logger.info(f"Final vocab size {len(self.sym2idx)}")
+ else:
+ logger.info(f"building vocab with min_freq={self.min_freq}, max_size={self.max_size}")
+ self.idx2sym = []
+ self.sym2idx = OrderedDict()
+
+ for sym in self.special:
+ self.add_special(sym)
+
+ for sym, cnt in self.counter.most_common(self.max_size):
+ if cnt < self.min_freq:
+ break
+ self.add_symbol(sym)
+
+ logger.info(f"Final vocab size {len(self.sym2idx)} from {len(self.counter)} unique tokens")
+
+ @torch_only_method
+ def encode_file(self, path, ordered=False, verbose=False, add_eos=True, add_double_eos=False):
+ if verbose:
+ logger.info(f"encoding file {path} ...")
+ assert os.path.exists(path), f"Output file {path} not found"
+ encoded = []
+ with open(path, "r", encoding="utf-8") as f:
+ for idx, line in enumerate(f):
+ if verbose and idx > 0 and idx % 500000 == 0:
+ logger.info(f" line {idx}")
+ symbols = self.tokenize(line, add_eos=add_eos, add_double_eos=add_double_eos)
+ encoded.append(self.convert_to_tensor(symbols))
+
+ if ordered:
+ encoded = torch.cat(encoded)
+
+ return encoded
+
+ @torch_only_method
+ def encode_sents(self, sents, ordered=False, verbose=False):
+ if verbose:
+ logger.info(f"encoding {len(sents)} sents ...")
+ encoded = []
+ for idx, symbols in enumerate(sents):
+ if verbose and idx > 0 and idx % 500000 == 0:
+ logger.info(f" line {idx}")
+ encoded.append(self.convert_to_tensor(symbols))
+
+ if ordered:
+ encoded = torch.cat(encoded)
+
+ return encoded
+
+ def add_special(self, sym):
+ if sym not in self.sym2idx:
+ self.idx2sym.append(sym)
+ self.sym2idx[sym] = len(self.idx2sym) - 1
+ setattr(self, f"{sym.strip('<>')}_idx", self.sym2idx[sym])
+
+ def add_symbol(self, sym):
+ if sym not in self.sym2idx:
+ self.idx2sym.append(sym)
+ self.sym2idx[sym] = len(self.idx2sym) - 1
+
+ def move_added_token(self, token: str, target_idx: int):
+ """
+ Moves an added token to a specific position in the vocab. This method should be used when resizing an embedding
+ layer other than the last one in the `AdaptiveEmbedding` in order to move the token in the tokenizer from the
+ default position (at the very end) to the desired one.
+
+ Args:
+ token: The token to move to a specific position in the vocab.
+ target_idx: The position where the token should be moved to.
+ """
+ assert token in self.added_tokens_encoder, "Token which should be moved has to be an added token"
+ assert token not in self.idx2sym, "Token which should be moved is already in vocab"
+
+ # Insert sym into vocab
+ self.idx2sym.insert(target_idx, token)
+ self.sym2idx[token] = target_idx
+
+ # Shift following indices in sym2idx
+ for idx in range(target_idx + 1, len(self.idx2sym)):
+ current_sym = self.idx2sym[idx]
+ self.sym2idx[current_sym] = idx
+
+ # Delete token from added_tokens
+ old_index = self._added_tokens_encoder.pop(token)
+ self._added_tokens_decoder.pop(old_index)
+
+ def moses_punct_norm(self, text):
+ return self.moses_punct_normalizer.normalize(text)
+
+ def moses_tokenize(self, text):
+ return self.moses_tokenizer.tokenize(
+ text, aggressive_dash_splits=True, return_str=False, escape=False, protected_patterns=self.never_split
+ )
+
+ def moses_pipeline(self, text: str) -> List[str]:
+ """
+ Does basic tokenization using [`sacremoses.MosesPunctNormalizer`] and [`sacremoses.MosesTokenizer`] with
+ *aggressive_dash_splits=True* (see [`sacremoses.tokenize.MosesTokenizer.tokenize`]). Additionally, large
+ comma-separated numbers and floating point values are split. E.g. "23,000 people are 1.80m tall" -> "23 @,@ 000
+ people are 1 @.@ 80m tall"
+
+ Args:
+ text: Text to be tokenize
+
+ Returns:
+ A list of tokenized string
+
+ Example:
+
+ ```python
+ >>> tokenizer = TransfoXLTokenizer.from_pretrained("transfo-xl/transfo-xl-wt103")
+ >>> tokenizer.moses_pipeline("23,000 people are 1.80 m tall")
+ ['23', '@,@', '000', 'people', 'are', '1', '@.@', '80', 'm', 'tall']
+ ```"""
+ text = self.moses_punct_norm(text)
+ text = self.moses_tokenize(text)
+ text = tokenize_numbers(text)
+ return text
+
+ def _convert_id_to_token(self, idx):
+ """Converts an id in a token (BPE) using the vocab."""
+ assert 0 <= idx < len(self), f"Index {idx} out of vocabulary range"
+ return self.idx2sym[idx]
+
+ def _convert_token_to_id(self, sym):
+ """Converts a token (str) in an id using the vocab."""
+ if sym in self.sym2idx:
+ return self.sym2idx[sym]
+ else:
+ # logger.info(f'encounter unk {sym}')
+ # assert '' not in sym
+ if hasattr(self, "unk_idx"):
+ return self.sym2idx.get(sym, self.unk_idx)
+ # Backward compatibility with pre-trained models
+ elif "" in self.sym2idx:
+ return self.sym2idx[""]
+ elif "" in self.sym2idx:
+ return self.sym2idx[""]
+ else:
+ raise ValueError("Token not in vocabulary and no token in vocabulary for replacement.")
+
+ def convert_tokens_to_string(self, tokens):
+ """
+ Converts a sequence of tokens (string) in a single string. Additionally, the split numbers are converted back
+ into it's original form.
+ """
+ out_string = self.moses_detokenizer.detokenize(tokens)
+ return detokenize_numbers(out_string).strip()
+
+ @torch_only_method
+ def convert_to_tensor(self, symbols):
+ return torch.LongTensor(self.convert_tokens_to_ids(symbols))
+
+ @property
+ def vocab_size(self):
+ return len(self.idx2sym)
+
+ def get_vocab(self):
+ vocab = self.sym2idx.copy()
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ def _tokenize(self, line, add_eos=False, add_double_eos=False):
+ line = line.strip()
+ # convert to lower case
+ if self.lower_case:
+ line = line.lower()
+
+ # empty delimiter '' will evaluate False
+ if self.delimiter == "":
+ symbols = line
+ else:
+ symbols = self.moses_pipeline(line)
+
+ if add_double_eos: # lm1b
+ return [""] + symbols + [""]
+ elif add_eos:
+ return symbols + [""]
+ else:
+ return symbols
+
+
+class LMOrderedIterator(object):
+ def __init__(self, data, bsz, bptt, device="cpu", ext_len=None):
+ """
+ data -- LongTensor -- the LongTensor is strictly ordered
+ """
+ self.bsz = bsz
+ self.bptt = bptt
+ self.ext_len = ext_len if ext_len is not None else 0
+
+ self.device = device
+
+ # Work out how cleanly we can divide the dataset into bsz parts.
+ self.n_step = data.size(0) // bsz
+
+ # Trim off any extra elements that wouldn't cleanly fit (remainders).
+ data = data.narrow(0, 0, self.n_step * bsz)
+
+ # Evenly divide the data across the bsz batches.
+ self.data = data.view(bsz, -1).t().contiguous().to(device)
+
+ # Number of mini-batches
+ self.n_batch = (self.n_step + self.bptt - 1) // self.bptt
+
+ def get_batch(self, i, bptt=None):
+ if bptt is None:
+ bptt = self.bptt
+ seq_len = min(bptt, self.data.size(0) - 1 - i)
+
+ end_idx = i + seq_len
+ beg_idx = max(0, i - self.ext_len)
+
+ data = self.data[beg_idx:end_idx]
+ target = self.data[i + 1 : i + 1 + seq_len]
+
+ data_out = data.transpose(0, 1).contiguous().to(self.device)
+ target_out = target.transpose(0, 1).contiguous().to(self.device)
+
+ return data_out, target_out, seq_len
+
+ def get_fixlen_iter(self, start=0):
+ for i in range(start, self.data.size(0) - 1, self.bptt):
+ yield self.get_batch(i)
+
+ def get_varlen_iter(self, start=0, std=5, min_len=5, max_deviation=3):
+ max_len = self.bptt + max_deviation * std
+ i = start
+ while True:
+ bptt = self.bptt if np.random.random() < 0.95 else self.bptt / 2.0
+ bptt = min(max_len, max(min_len, int(np.random.normal(bptt, std))))
+ data, target, seq_len = self.get_batch(i, bptt)
+ i += seq_len
+ yield data, target, seq_len
+ if i >= self.data.size(0) - 2:
+ break
+
+ def __iter__(self):
+ return self.get_fixlen_iter()
+
+
+class LMShuffledIterator(object):
+ def __init__(self, data, bsz, bptt, device="cpu", ext_len=None, shuffle=False):
+ """
+ data -- list[LongTensor] -- there is no order among the LongTensors
+ """
+ self.data = data
+
+ self.bsz = bsz
+ self.bptt = bptt
+ self.ext_len = ext_len if ext_len is not None else 0
+
+ self.device = device
+ self.shuffle = shuffle
+
+ def get_sent_stream(self):
+ # index iterator
+ epoch_indices = np.random.permutation(len(self.data)) if self.shuffle else np.array(range(len(self.data)))
+
+ # sentence iterator
+ for idx in epoch_indices:
+ yield self.data[idx]
+
+ @torch_only_method
+ def stream_iterator(self, sent_stream):
+ # streams for each data in the batch
+ streams = [None] * self.bsz
+
+ data = torch.LongTensor(self.bptt, self.bsz)
+ target = torch.LongTensor(self.bptt, self.bsz)
+
+ n_retain = 0
+
+ while True:
+ # data : [n_retain+bptt x bsz]
+ # target : [bptt x bsz]
+ data[n_retain:].fill_(-1)
+ target.fill_(-1)
+
+ valid_batch = True
+
+ for i in range(self.bsz):
+ n_filled = 0
+ try:
+ while n_filled < self.bptt:
+ if streams[i] is None or len(streams[i]) <= 1:
+ streams[i] = next(sent_stream)
+ # number of new tokens to fill in
+ n_new = min(len(streams[i]) - 1, self.bptt - n_filled)
+ # first n_retain tokens are retained from last batch
+ data[n_retain + n_filled : n_retain + n_filled + n_new, i] = streams[i][:n_new]
+ target[n_filled : n_filled + n_new, i] = streams[i][1 : n_new + 1]
+ streams[i] = streams[i][n_new:]
+ n_filled += n_new
+ except StopIteration:
+ valid_batch = False
+ break
+
+ if not valid_batch:
+ return
+
+ data_out = data.transpose(0, 1).contiguous().to(self.device)
+ target_out = target.transpose(0, 1).contiguous().to(self.device)
+
+ yield data_out, target_out, self.bptt
+
+ n_retain = min(data.size(0), self.ext_len)
+ if n_retain > 0:
+ data[:n_retain] = data[-n_retain:]
+ data.resize_(n_retain + self.bptt, data.size(1))
+
+ def __iter__(self):
+ # sent_stream is an iterator
+ sent_stream = self.get_sent_stream()
+
+ for batch in self.stream_iterator(sent_stream):
+ yield batch
+
+
+class LMMultiFileIterator(LMShuffledIterator):
+ def __init__(self, paths, vocab, bsz, bptt, device="cpu", ext_len=None, shuffle=False):
+ self.paths = paths
+ self.vocab = vocab
+
+ self.bsz = bsz
+ self.bptt = bptt
+ self.ext_len = ext_len if ext_len is not None else 0
+
+ self.device = device
+ self.shuffle = shuffle
+
+ def get_sent_stream(self, path):
+ sents = self.vocab.encode_file(path, add_double_eos=True)
+ if self.shuffle:
+ np.random.shuffle(sents)
+ sent_stream = iter(sents)
+
+ return sent_stream
+
+ def __iter__(self):
+ if self.shuffle:
+ np.random.shuffle(self.paths)
+
+ for path in self.paths:
+ # sent_stream is an iterator
+ sent_stream = self.get_sent_stream(path)
+ for batch in self.stream_iterator(sent_stream):
+ yield batch
+
+
+class TransfoXLCorpus(object):
+ @classmethod
+ @torch_only_method
+ def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
+ """
+ Instantiate a pre-processed corpus.
+ """
+ vocab = TransfoXLTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
+ is_local = os.path.isdir(pretrained_model_name_or_path)
+ # redirect to the cache, if necessary
+ try:
+ resolved_corpus_file = cached_file(pretrained_model_name_or_path, CORPUS_NAME, cache_dir=cache_dir)
+ except EnvironmentError:
+ logger.error(
+ f"Corpus '{pretrained_model_name_or_path}' was not found in corpus list"
+ f" ({', '.join(PRETRAINED_CORPUS_ARCHIVE_MAP.keys())}. We assumed '{pretrained_model_name_or_path}'"
+ f" was a path or url but couldn't find files {CORPUS_NAME} at this path or url."
+ )
+ return None
+ if is_local:
+ logger.info(f"loading corpus file {resolved_corpus_file}")
+ else:
+ logger.info(f"loading corpus file {CORPUS_NAME} from cache at {resolved_corpus_file}")
+
+ # Instantiate tokenizer.
+ corpus = cls(*inputs, **kwargs)
+ corpus_dict = torch.load(resolved_corpus_file)
+ for key, value in corpus_dict.items():
+ corpus.__dict__[key] = value
+ corpus.vocab = vocab
+ if corpus.train is not None:
+ corpus.train = torch.tensor(corpus.train, dtype=torch.long)
+ if corpus.valid is not None:
+ corpus.valid = torch.tensor(corpus.valid, dtype=torch.long)
+ if corpus.test is not None:
+ corpus.test = torch.tensor(corpus.test, dtype=torch.long)
+ return corpus
+
+ def __init__(self, *args, **kwargs):
+ self.vocab = TransfoXLTokenizer(*args, **kwargs)
+ self.dataset = None
+ self.train = None
+ self.valid = None
+ self.test = None
+
+ def build_corpus(self, path, dataset):
+ self.dataset = dataset
+
+ if self.dataset in ["ptb", "wt2", "enwik8", "text8"]:
+ self.vocab.count_file(os.path.join(path, "train.txt"))
+ self.vocab.count_file(os.path.join(path, "valid.txt"))
+ self.vocab.count_file(os.path.join(path, "test.txt"))
+ elif self.dataset == "wt103":
+ self.vocab.count_file(os.path.join(path, "train.txt"))
+ elif self.dataset == "lm1b":
+ train_path_pattern = os.path.join(
+ path,
+ "1-billion-word-language-modeling-benchmark-r13output",
+ "training-monolingual.tokenized.shuffled",
+ "news.en-*",
+ )
+ train_paths = glob.glob(train_path_pattern)
+ # the vocab will load from file when build_vocab() is called
+
+ self.vocab.build_vocab()
+
+ if self.dataset in ["ptb", "wt2", "wt103"]:
+ self.train = self.vocab.encode_file(os.path.join(path, "train.txt"), ordered=True)
+ self.valid = self.vocab.encode_file(os.path.join(path, "valid.txt"), ordered=True)
+ self.test = self.vocab.encode_file(os.path.join(path, "test.txt"), ordered=True)
+ elif self.dataset in ["enwik8", "text8"]:
+ self.train = self.vocab.encode_file(os.path.join(path, "train.txt"), ordered=True, add_eos=False)
+ self.valid = self.vocab.encode_file(os.path.join(path, "valid.txt"), ordered=True, add_eos=False)
+ self.test = self.vocab.encode_file(os.path.join(path, "test.txt"), ordered=True, add_eos=False)
+ elif self.dataset == "lm1b":
+ self.train = train_paths
+ self.valid = self.vocab.encode_file(os.path.join(path, "valid.txt"), ordered=False, add_double_eos=True)
+ self.test = self.vocab.encode_file(os.path.join(path, "test.txt"), ordered=False, add_double_eos=True)
+
+ def get_iterator(self, split, *args, **kwargs):
+ if split == "train":
+ if self.dataset in ["ptb", "wt2", "wt103", "enwik8", "text8"]:
+ data_iter = LMOrderedIterator(self.train, *args, **kwargs)
+ elif self.dataset == "lm1b":
+ kwargs["shuffle"] = True
+ data_iter = LMMultiFileIterator(self.train, self.vocab, *args, **kwargs)
+ elif split in ["valid", "test"]:
+ data = self.valid if split == "valid" else self.test
+ if self.dataset in ["ptb", "wt2", "wt103", "enwik8", "text8"]:
+ data_iter = LMOrderedIterator(data, *args, **kwargs)
+ elif self.dataset == "lm1b":
+ data_iter = LMShuffledIterator(data, *args, **kwargs)
+ else:
+ data_iter = None
+ raise ValueError(f"Split not recognized: {split}")
+
+ return data_iter
+
+
+@torch_only_method
+def get_lm_corpus(datadir, dataset):
+ fn = os.path.join(datadir, "cache.pt")
+ fn_pickle = os.path.join(datadir, "cache.pkl")
+ if os.path.exists(fn):
+ logger.info("Loading cached dataset...")
+ corpus = torch.load(fn_pickle)
+ elif os.path.exists(fn):
+ logger.info("Loading cached dataset from pickle...")
+ if not strtobool(os.environ.get("TRUST_REMOTE_CODE", "False")):
+ raise ValueError(
+ "This part uses `pickle.load` which is insecure and will execute arbitrary code that is potentially "
+ "malicious. It's recommended to never unpickle data that could have come from an untrusted source, or "
+ "that could have been tampered with. If you already verified the pickle data and decided to use it, "
+ "you can set the environment variable `TRUST_REMOTE_CODE` to `True` to allow it."
+ )
+ with open(fn, "rb") as fp:
+ corpus = pickle.load(fp)
+ else:
+ logger.info(f"Producing dataset {dataset}...")
+ kwargs = {}
+ if dataset in ["wt103", "wt2"]:
+ kwargs["special"] = [""]
+ kwargs["lower_case"] = False
+ elif dataset == "ptb":
+ kwargs["special"] = [""]
+ kwargs["lower_case"] = True
+ elif dataset == "lm1b":
+ kwargs["special"] = []
+ kwargs["lower_case"] = False
+ kwargs["vocab_file"] = os.path.join(datadir, "1b_word_vocab.txt")
+ elif dataset in ["enwik8", "text8"]:
+ pass
+
+ corpus = TransfoXLCorpus(datadir, dataset, **kwargs)
+ torch.save(corpus, fn)
+
+ return corpus
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/van/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/van/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2db730984ffa031458589f1cc6c6c1944eba0e91
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/van/__init__.py
@@ -0,0 +1,54 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
+
+
+_import_structure = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]}
+
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_van"] = [
+ "VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "VanForImageClassification",
+ "VanModel",
+ "VanPreTrainedModel",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_van import (
+ VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
+ VanForImageClassification,
+ VanModel,
+ VanPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/van/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/van/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4e63a72165709de76b6aaf4c2997f4204a418ba0
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/van/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/van/__pycache__/configuration_van.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/van/__pycache__/configuration_van.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1c6c9dab3f0f0d1af16d01a23e09f64a57cdb769
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/van/__pycache__/configuration_van.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/van/__pycache__/convert_van_to_pytorch.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/van/__pycache__/convert_van_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c89f9553c3a5eb1deb189556d4eca8be2a79242d
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/van/__pycache__/convert_van_to_pytorch.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/van/__pycache__/modeling_van.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/van/__pycache__/modeling_van.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2b5945589e1895e7ea97af3f57b80868bfe4d3d5
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/van/__pycache__/modeling_van.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/van/configuration_van.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/van/configuration_van.py
new file mode 100644
index 0000000000000000000000000000000000000000..85f228193c450e4cbb8a5c992176a75daef1010f
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/van/configuration_van.py
@@ -0,0 +1,113 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" VAN model configuration"""
+
+from ....configuration_utils import PretrainedConfig
+from ....utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VAN_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "Visual-Attention-Network/van-base": (
+ "https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
+ ),
+}
+
+
+class VanConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`VanModel`]. It is used to instantiate a VAN model
+ according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the VAN
+ [Visual-Attention-Network/van-base](https://huggingface.co/Visual-Attention-Network/van-base) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ image_size (`int`, *optional*, defaults to 224):
+ The size (resolution) of each image.
+ num_channels (`int`, *optional*, defaults to 3):
+ The number of input channels.
+ patch_sizes (`List[int]`, *optional*, defaults to `[7, 3, 3, 3]`):
+ Patch size to use in each stage's embedding layer.
+ strides (`List[int]`, *optional*, defaults to `[4, 2, 2, 2]`):
+ Stride size to use in each stage's embedding layer to downsample the input.
+ hidden_sizes (`List[int]`, *optional*, defaults to `[64, 128, 320, 512]`):
+ Dimensionality (hidden size) at each stage.
+ depths (`List[int]`, *optional*, defaults to `[3, 3, 12, 3]`):
+ Depth (number of layers) for each stage.
+ mlp_ratios (`List[int]`, *optional*, defaults to `[8, 8, 4, 4]`):
+ The expansion ratio for mlp layer at each stage.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in each layer. If string, `"gelu"`, `"relu"`,
+ `"selu"` and `"gelu_new"` are supported.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
+ The epsilon used by the layer normalization layers.
+ layer_scale_init_value (`float`, *optional*, defaults to 0.01):
+ The initial value for layer scaling.
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
+ The dropout probability for stochastic depth.
+ dropout_rate (`float`, *optional*, defaults to 0.0):
+ The dropout probability for dropout.
+
+ Example:
+ ```python
+ >>> from transformers import VanModel, VanConfig
+
+ >>> # Initializing a VAN van-base style configuration
+ >>> configuration = VanConfig()
+ >>> # Initializing a model from the van-base style configuration
+ >>> model = VanModel(configuration)
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "van"
+
+ def __init__(
+ self,
+ image_size=224,
+ num_channels=3,
+ patch_sizes=[7, 3, 3, 3],
+ strides=[4, 2, 2, 2],
+ hidden_sizes=[64, 128, 320, 512],
+ depths=[3, 3, 12, 3],
+ mlp_ratios=[8, 8, 4, 4],
+ hidden_act="gelu",
+ initializer_range=0.02,
+ layer_norm_eps=1e-6,
+ layer_scale_init_value=1e-2,
+ drop_path_rate=0.0,
+ dropout_rate=0.0,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ self.image_size = image_size
+ self.num_channels = num_channels
+ self.patch_sizes = patch_sizes
+ self.strides = strides
+ self.hidden_sizes = hidden_sizes
+ self.depths = depths
+ self.mlp_ratios = mlp_ratios
+ self.hidden_act = hidden_act
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.layer_scale_init_value = layer_scale_init_value
+ self.drop_path_rate = drop_path_rate
+ self.dropout_rate = dropout_rate
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/van/convert_van_to_pytorch.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/van/convert_van_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..20492e42be2043d50e39b7573fc4e9fca05c7d32
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/van/convert_van_to_pytorch.py
@@ -0,0 +1,291 @@
+# coding=utf-8
+# Copyright 2022 BNRist (Tsinghua University), TKLNDST (Nankai University) and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert VAN checkpoints from the original repository.
+
+URL: https://github.com/Visual-Attention-Network/VAN-Classification"""
+
+
+import argparse
+import json
+import sys
+from dataclasses import dataclass, field
+from functools import partial
+from pathlib import Path
+from typing import List
+
+import torch
+import torch.nn as nn
+from huggingface_hub import cached_download, hf_hub_download
+from torch import Tensor
+
+from transformers import AutoImageProcessor, VanConfig, VanForImageClassification
+from transformers.models.deprecated.van.modeling_van import VanLayerScaling
+from transformers.utils import logging
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+
+@dataclass
+class Tracker:
+ module: nn.Module
+ traced: List[nn.Module] = field(default_factory=list)
+ handles: list = field(default_factory=list)
+
+ def _forward_hook(self, m, inputs: Tensor, outputs: Tensor):
+ has_not_submodules = len(list(m.modules())) == 1 or isinstance(m, nn.Conv2d) or isinstance(m, nn.BatchNorm2d)
+ if has_not_submodules:
+ if not isinstance(m, VanLayerScaling):
+ self.traced.append(m)
+
+ def __call__(self, x: Tensor):
+ for m in self.module.modules():
+ self.handles.append(m.register_forward_hook(self._forward_hook))
+ self.module(x)
+ [x.remove() for x in self.handles]
+ return self
+
+ @property
+ def parametrized(self):
+ # check the len of the state_dict keys to see if we have learnable params
+ return list(filter(lambda x: len(list(x.state_dict().keys())) > 0, self.traced))
+
+
+@dataclass
+class ModuleTransfer:
+ src: nn.Module
+ dest: nn.Module
+ verbose: int = 0
+ src_skip: List = field(default_factory=list)
+ dest_skip: List = field(default_factory=list)
+
+ def __call__(self, x: Tensor):
+ """
+ Transfer the weights of `self.src` to `self.dest` by performing a forward pass using `x` as input. Under the
+ hood we tracked all the operations in both modules.
+ """
+ dest_traced = Tracker(self.dest)(x).parametrized
+ src_traced = Tracker(self.src)(x).parametrized
+
+ src_traced = list(filter(lambda x: type(x) not in self.src_skip, src_traced))
+ dest_traced = list(filter(lambda x: type(x) not in self.dest_skip, dest_traced))
+
+ if len(dest_traced) != len(src_traced):
+ raise Exception(
+ f"Numbers of operations are different. Source module has {len(src_traced)} operations while"
+ f" destination module has {len(dest_traced)}."
+ )
+
+ for dest_m, src_m in zip(dest_traced, src_traced):
+ dest_m.load_state_dict(src_m.state_dict())
+ if self.verbose == 1:
+ print(f"Transfered from={src_m} to={dest_m}")
+
+
+def copy_parameters(from_model: nn.Module, our_model: nn.Module) -> nn.Module:
+ # nn.Parameter cannot be tracked by the Tracker, thus we need to manually convert them
+ from_state_dict = from_model.state_dict()
+ our_state_dict = our_model.state_dict()
+ config = our_model.config
+ all_keys = []
+ for stage_idx in range(len(config.hidden_sizes)):
+ for block_id in range(config.depths[stage_idx]):
+ from_key = f"block{stage_idx + 1}.{block_id}.layer_scale_1"
+ to_key = f"van.encoder.stages.{stage_idx}.layers.{block_id}.attention_scaling.weight"
+
+ all_keys.append((from_key, to_key))
+ from_key = f"block{stage_idx + 1}.{block_id}.layer_scale_2"
+ to_key = f"van.encoder.stages.{stage_idx}.layers.{block_id}.mlp_scaling.weight"
+
+ all_keys.append((from_key, to_key))
+
+ for from_key, to_key in all_keys:
+ our_state_dict[to_key] = from_state_dict.pop(from_key)
+
+ our_model.load_state_dict(our_state_dict)
+ return our_model
+
+
+def convert_weight_and_push(
+ name: str,
+ config: VanConfig,
+ checkpoint: str,
+ from_model: nn.Module,
+ save_directory: Path,
+ push_to_hub: bool = True,
+):
+ print(f"Downloading weights for {name}...")
+ checkpoint_path = cached_download(checkpoint)
+ print(f"Converting {name}...")
+ from_state_dict = torch.load(checkpoint_path)["state_dict"]
+ from_model.load_state_dict(from_state_dict)
+ from_model.eval()
+ with torch.no_grad():
+ our_model = VanForImageClassification(config).eval()
+ module_transfer = ModuleTransfer(src=from_model, dest=our_model)
+ x = torch.randn((1, 3, 224, 224))
+ module_transfer(x)
+ our_model = copy_parameters(from_model, our_model)
+
+ if not torch.allclose(from_model(x), our_model(x).logits):
+ raise ValueError("The model logits don't match the original one.")
+
+ checkpoint_name = name
+ print(checkpoint_name)
+
+ if push_to_hub:
+ our_model.push_to_hub(
+ repo_path_or_name=save_directory / checkpoint_name,
+ commit_message="Add model",
+ use_temp_dir=True,
+ )
+
+ # we can use the convnext one
+ image_processor = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k")
+ image_processor.push_to_hub(
+ repo_path_or_name=save_directory / checkpoint_name,
+ commit_message="Add image processor",
+ use_temp_dir=True,
+ )
+
+ print(f"Pushed {checkpoint_name}")
+
+
+def convert_weights_and_push(save_directory: Path, model_name: str = None, push_to_hub: bool = True):
+ filename = "imagenet-1k-id2label.json"
+ num_labels = 1000
+
+ repo_id = "huggingface/label-files"
+ num_labels = num_labels
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
+ id2label = {int(k): v for k, v in id2label.items()}
+
+ id2label = id2label
+ label2id = {v: k for k, v in id2label.items()}
+
+ ImageNetPreTrainedConfig = partial(VanConfig, num_labels=num_labels, id2label=id2label, label2id=label2id)
+
+ names_to_config = {
+ "van-tiny": ImageNetPreTrainedConfig(
+ hidden_sizes=[32, 64, 160, 256],
+ depths=[3, 3, 5, 2],
+ mlp_ratios=[8, 8, 4, 4],
+ ),
+ "van-small": ImageNetPreTrainedConfig(
+ hidden_sizes=[64, 128, 320, 512],
+ depths=[2, 2, 4, 2],
+ mlp_ratios=[8, 8, 4, 4],
+ ),
+ "van-base": ImageNetPreTrainedConfig(
+ hidden_sizes=[64, 128, 320, 512],
+ depths=[3, 3, 12, 3],
+ mlp_ratios=[8, 8, 4, 4],
+ ),
+ "van-large": ImageNetPreTrainedConfig(
+ hidden_sizes=[64, 128, 320, 512],
+ depths=[3, 5, 27, 3],
+ mlp_ratios=[8, 8, 4, 4],
+ ),
+ }
+
+ names_to_original_models = {
+ "van-tiny": van_tiny,
+ "van-small": van_small,
+ "van-base": van_base,
+ "van-large": van_large,
+ }
+
+ names_to_original_checkpoints = {
+ "van-tiny": (
+ "https://huggingface.co/Visual-Attention-Network/VAN-Tiny-original/resolve/main/van_tiny_754.pth.tar"
+ ),
+ "van-small": (
+ "https://huggingface.co/Visual-Attention-Network/VAN-Small-original/resolve/main/van_small_811.pth.tar"
+ ),
+ "van-base": (
+ "https://huggingface.co/Visual-Attention-Network/VAN-Base-original/resolve/main/van_base_828.pth.tar"
+ ),
+ "van-large": (
+ "https://huggingface.co/Visual-Attention-Network/VAN-Large-original/resolve/main/van_large_839.pth.tar"
+ ),
+ }
+
+ if model_name:
+ convert_weight_and_push(
+ model_name,
+ names_to_config[model_name],
+ checkpoint=names_to_original_checkpoints[model_name],
+ from_model=names_to_original_models[model_name](),
+ save_directory=save_directory,
+ push_to_hub=push_to_hub,
+ )
+ else:
+ for model_name, config in names_to_config.items():
+ convert_weight_and_push(
+ model_name,
+ config,
+ checkpoint=names_to_original_checkpoints[model_name],
+ from_model=names_to_original_models[model_name](),
+ save_directory=save_directory,
+ push_to_hub=push_to_hub,
+ )
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--model-name",
+ default=None,
+ type=str,
+ help=(
+ "The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
+ " currently: van-tiny/small/base/large. If `None`, all of them will the converted."
+ ),
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path",
+ default=None,
+ type=Path,
+ required=True,
+ help="Path to the output PyTorch model directory.",
+ )
+ parser.add_argument(
+ "--van_dir",
+ required=True,
+ type=Path,
+ help=(
+ "A path to VAN's original implementation directory. You can download from here:"
+ " https://github.com/Visual-Attention-Network/VAN-Classification"
+ ),
+ )
+ parser.add_argument(
+ "--push_to_hub",
+ default=True,
+ type=bool,
+ required=False,
+ help="If True, push model and image processor to the hub.",
+ )
+
+ args = parser.parse_args()
+ pytorch_dump_folder_path: Path = args.pytorch_dump_folder_path
+ pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
+ van_dir = args.van_dir
+ # append the path to the parents to maskformer dir
+ sys.path.append(str(van_dir.parent))
+ from van.models.van import van_base, van_large, van_small, van_tiny
+
+ convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/van/modeling_van.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/van/modeling_van.py
new file mode 100644
index 0000000000000000000000000000000000000000..e0f88467e1e75b6fed5db090c6090360ceb55c08
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/van/modeling_van.py
@@ -0,0 +1,543 @@
+# coding=utf-8
+# Copyright 2022 BNRist (Tsinghua University), TKLNDST (Nankai University) and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch Visual Attention Network (VAN) model."""
+
+import math
+from collections import OrderedDict
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ....activations import ACT2FN
+from ....modeling_outputs import (
+ BaseModelOutputWithNoAttention,
+ BaseModelOutputWithPoolingAndNoAttention,
+ ImageClassifierOutputWithNoAttention,
+)
+from ....modeling_utils import PreTrainedModel
+from ....utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
+from .configuration_van import VanConfig
+
+
+logger = logging.get_logger(__name__)
+
+# General docstring
+_CONFIG_FOR_DOC = "VanConfig"
+
+# Base docstring
+_CHECKPOINT_FOR_DOC = "Visual-Attention-Network/van-base"
+_EXPECTED_OUTPUT_SHAPE = [1, 512, 7, 7]
+
+# Image classification docstring
+_IMAGE_CLASS_CHECKPOINT = "Visual-Attention-Network/van-base"
+_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
+
+VAN_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "Visual-Attention-Network/van-base",
+ # See all VAN models at https://huggingface.co/models?filter=van
+]
+
+
+# Copied from transformers.models.convnext.modeling_convnext.drop_path
+def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
+ """
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
+
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
+ argument.
+ """
+ if drop_prob == 0.0 or not training:
+ return input
+ keep_prob = 1 - drop_prob
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
+ random_tensor.floor_() # binarize
+ output = input.div(keep_prob) * random_tensor
+ return output
+
+
+# Copied from transformers.models.convnext.modeling_convnext.ConvNextDropPath with ConvNext->Van
+class VanDropPath(nn.Module):
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
+
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
+ super().__init__()
+ self.drop_prob = drop_prob
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ return drop_path(hidden_states, self.drop_prob, self.training)
+
+ def extra_repr(self) -> str:
+ return "p={}".format(self.drop_prob)
+
+
+class VanOverlappingPatchEmbedder(nn.Module):
+ """
+ Downsamples the input using a patchify operation with a `stride` of 4 by default making adjacent windows overlap by
+ half of the area. From [PVTv2: Improved Baselines with Pyramid Vision
+ Transformer](https://arxiv.org/abs/2106.13797).
+ """
+
+ def __init__(self, in_channels: int, hidden_size: int, patch_size: int = 7, stride: int = 4):
+ super().__init__()
+ self.convolution = nn.Conv2d(
+ in_channels, hidden_size, kernel_size=patch_size, stride=stride, padding=patch_size // 2
+ )
+ self.normalization = nn.BatchNorm2d(hidden_size)
+
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
+ hidden_state = self.convolution(input)
+ hidden_state = self.normalization(hidden_state)
+ return hidden_state
+
+
+class VanMlpLayer(nn.Module):
+ """
+ MLP with depth-wise convolution, from [PVTv2: Improved Baselines with Pyramid Vision
+ Transformer](https://arxiv.org/abs/2106.13797).
+ """
+
+ def __init__(
+ self,
+ in_channels: int,
+ hidden_size: int,
+ out_channels: int,
+ hidden_act: str = "gelu",
+ dropout_rate: float = 0.5,
+ ):
+ super().__init__()
+ self.in_dense = nn.Conv2d(in_channels, hidden_size, kernel_size=1)
+ self.depth_wise = nn.Conv2d(hidden_size, hidden_size, kernel_size=3, padding=1, groups=hidden_size)
+ self.activation = ACT2FN[hidden_act]
+ self.dropout1 = nn.Dropout(dropout_rate)
+ self.out_dense = nn.Conv2d(hidden_size, out_channels, kernel_size=1)
+ self.dropout2 = nn.Dropout(dropout_rate)
+
+ def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
+ hidden_state = self.in_dense(hidden_state)
+ hidden_state = self.depth_wise(hidden_state)
+ hidden_state = self.activation(hidden_state)
+ hidden_state = self.dropout1(hidden_state)
+ hidden_state = self.out_dense(hidden_state)
+ hidden_state = self.dropout2(hidden_state)
+ return hidden_state
+
+
+class VanLargeKernelAttention(nn.Module):
+ """
+ Basic Large Kernel Attention (LKA).
+ """
+
+ def __init__(self, hidden_size: int):
+ super().__init__()
+ self.depth_wise = nn.Conv2d(hidden_size, hidden_size, kernel_size=5, padding=2, groups=hidden_size)
+ self.depth_wise_dilated = nn.Conv2d(
+ hidden_size, hidden_size, kernel_size=7, dilation=3, padding=9, groups=hidden_size
+ )
+ self.point_wise = nn.Conv2d(hidden_size, hidden_size, kernel_size=1)
+
+ def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
+ hidden_state = self.depth_wise(hidden_state)
+ hidden_state = self.depth_wise_dilated(hidden_state)
+ hidden_state = self.point_wise(hidden_state)
+ return hidden_state
+
+
+class VanLargeKernelAttentionLayer(nn.Module):
+ """
+ Computes attention using Large Kernel Attention (LKA) and attends the input.
+ """
+
+ def __init__(self, hidden_size: int):
+ super().__init__()
+ self.attention = VanLargeKernelAttention(hidden_size)
+
+ def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
+ attention = self.attention(hidden_state)
+ attended = hidden_state * attention
+ return attended
+
+
+class VanSpatialAttentionLayer(nn.Module):
+ """
+ Van spatial attention layer composed by projection (via conv) -> act -> Large Kernel Attention (LKA) attention ->
+ projection (via conv) + residual connection.
+ """
+
+ def __init__(self, hidden_size: int, hidden_act: str = "gelu"):
+ super().__init__()
+ self.pre_projection = nn.Sequential(
+ OrderedDict(
+ [
+ ("conv", nn.Conv2d(hidden_size, hidden_size, kernel_size=1)),
+ ("act", ACT2FN[hidden_act]),
+ ]
+ )
+ )
+ self.attention_layer = VanLargeKernelAttentionLayer(hidden_size)
+ self.post_projection = nn.Conv2d(hidden_size, hidden_size, kernel_size=1)
+
+ def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
+ residual = hidden_state
+ hidden_state = self.pre_projection(hidden_state)
+ hidden_state = self.attention_layer(hidden_state)
+ hidden_state = self.post_projection(hidden_state)
+ hidden_state = hidden_state + residual
+ return hidden_state
+
+
+class VanLayerScaling(nn.Module):
+ """
+ Scales the inputs by a learnable parameter initialized by `initial_value`.
+ """
+
+ def __init__(self, hidden_size: int, initial_value: float = 1e-2):
+ super().__init__()
+ self.weight = nn.Parameter(initial_value * torch.ones((hidden_size)), requires_grad=True)
+
+ def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
+ # unsqueezing for broadcasting
+ hidden_state = self.weight.unsqueeze(-1).unsqueeze(-1) * hidden_state
+ return hidden_state
+
+
+class VanLayer(nn.Module):
+ """
+ Van layer composed by normalization layers, large kernel attention (LKA) and a multi layer perceptron (MLP).
+ """
+
+ def __init__(
+ self,
+ config: VanConfig,
+ hidden_size: int,
+ mlp_ratio: int = 4,
+ drop_path_rate: float = 0.5,
+ ):
+ super().__init__()
+ self.drop_path = VanDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
+ self.pre_normomalization = nn.BatchNorm2d(hidden_size)
+ self.attention = VanSpatialAttentionLayer(hidden_size, config.hidden_act)
+ self.attention_scaling = VanLayerScaling(hidden_size, config.layer_scale_init_value)
+ self.post_normalization = nn.BatchNorm2d(hidden_size)
+ self.mlp = VanMlpLayer(
+ hidden_size, hidden_size * mlp_ratio, hidden_size, config.hidden_act, config.dropout_rate
+ )
+ self.mlp_scaling = VanLayerScaling(hidden_size, config.layer_scale_init_value)
+
+ def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
+ residual = hidden_state
+ # attention
+ hidden_state = self.pre_normomalization(hidden_state)
+ hidden_state = self.attention(hidden_state)
+ hidden_state = self.attention_scaling(hidden_state)
+ hidden_state = self.drop_path(hidden_state)
+ # residual connection
+ hidden_state = residual + hidden_state
+ residual = hidden_state
+ # mlp
+ hidden_state = self.post_normalization(hidden_state)
+ hidden_state = self.mlp(hidden_state)
+ hidden_state = self.mlp_scaling(hidden_state)
+ hidden_state = self.drop_path(hidden_state)
+ # residual connection
+ hidden_state = residual + hidden_state
+ return hidden_state
+
+
+class VanStage(nn.Module):
+ """
+ VanStage, consisting of multiple layers.
+ """
+
+ def __init__(
+ self,
+ config: VanConfig,
+ in_channels: int,
+ hidden_size: int,
+ patch_size: int,
+ stride: int,
+ depth: int,
+ mlp_ratio: int = 4,
+ drop_path_rate: float = 0.0,
+ ):
+ super().__init__()
+ self.embeddings = VanOverlappingPatchEmbedder(in_channels, hidden_size, patch_size, stride)
+ self.layers = nn.Sequential(
+ *[
+ VanLayer(
+ config,
+ hidden_size,
+ mlp_ratio=mlp_ratio,
+ drop_path_rate=drop_path_rate,
+ )
+ for _ in range(depth)
+ ]
+ )
+ self.normalization = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps)
+
+ def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
+ hidden_state = self.embeddings(hidden_state)
+ hidden_state = self.layers(hidden_state)
+ # rearrange b c h w -> b (h w) c
+ batch_size, hidden_size, height, width = hidden_state.shape
+ hidden_state = hidden_state.flatten(2).transpose(1, 2)
+ hidden_state = self.normalization(hidden_state)
+ # rearrange b (h w) c- > b c h w
+ hidden_state = hidden_state.view(batch_size, height, width, hidden_size).permute(0, 3, 1, 2)
+ return hidden_state
+
+
+class VanEncoder(nn.Module):
+ """
+ VanEncoder, consisting of multiple stages.
+ """
+
+ def __init__(self, config: VanConfig):
+ super().__init__()
+ self.stages = nn.ModuleList([])
+ patch_sizes = config.patch_sizes
+ strides = config.strides
+ hidden_sizes = config.hidden_sizes
+ depths = config.depths
+ mlp_ratios = config.mlp_ratios
+ drop_path_rates = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))]
+
+ for num_stage, (patch_size, stride, hidden_size, depth, mlp_expantion, drop_path_rate) in enumerate(
+ zip(patch_sizes, strides, hidden_sizes, depths, mlp_ratios, drop_path_rates)
+ ):
+ is_first_stage = num_stage == 0
+ in_channels = hidden_sizes[num_stage - 1]
+ if is_first_stage:
+ in_channels = config.num_channels
+ self.stages.append(
+ VanStage(
+ config,
+ in_channels,
+ hidden_size,
+ patch_size=patch_size,
+ stride=stride,
+ depth=depth,
+ mlp_ratio=mlp_expantion,
+ drop_path_rate=drop_path_rate,
+ )
+ )
+
+ def forward(
+ self,
+ hidden_state: torch.Tensor,
+ output_hidden_states: Optional[bool] = False,
+ return_dict: Optional[bool] = True,
+ ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
+ all_hidden_states = () if output_hidden_states else None
+
+ for _, stage_module in enumerate(self.stages):
+ hidden_state = stage_module(hidden_state)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_state,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_state, all_hidden_states] if v is not None)
+
+ return BaseModelOutputWithNoAttention(last_hidden_state=hidden_state, hidden_states=all_hidden_states)
+
+
+class VanPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = VanConfig
+ base_model_prefix = "van"
+ main_input_name = "pixel_values"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, nn.Linear):
+ nn.init.trunc_normal_(module.weight, std=self.config.initializer_range)
+ if isinstance(module, nn.Linear) and module.bias is not None:
+ nn.init.constant_(module.bias, 0)
+ elif isinstance(module, nn.LayerNorm):
+ nn.init.constant_(module.bias, 0)
+ nn.init.constant_(module.weight, 1.0)
+ elif isinstance(module, nn.Conv2d):
+ fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels
+ fan_out //= module.groups
+ module.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
+ if module.bias is not None:
+ module.bias.data.zero_()
+
+
+VAN_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`VanConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+VAN_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
+ [`ConvNextImageProcessor.__call__`] for details.
+
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all stages. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare VAN model outputting raw features without any specific head on top. Note, VAN does not have an embedding"
+ " layer.",
+ VAN_START_DOCSTRING,
+)
+class VanModel(VanPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.config = config
+ self.encoder = VanEncoder(config)
+ # final layernorm layer
+ self.layernorm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps)
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(VAN_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithPoolingAndNoAttention,
+ config_class=_CONFIG_FOR_DOC,
+ modality="vision",
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
+ )
+ def forward(
+ self,
+ pixel_values: Optional[torch.FloatTensor],
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPoolingAndNoAttention]:
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ encoder_outputs = self.encoder(
+ pixel_values,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ last_hidden_state = encoder_outputs[0]
+ # global average pooling, n c w h -> n c
+ pooled_output = last_hidden_state.mean(dim=[-2, -1])
+
+ if not return_dict:
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPoolingAndNoAttention(
+ last_hidden_state=last_hidden_state,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ )
+
+
+@add_start_docstrings(
+ """
+ VAN Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
+ ImageNet.
+ """,
+ VAN_START_DOCSTRING,
+)
+class VanForImageClassification(VanPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.van = VanModel(config)
+ # Classifier head
+ self.classifier = (
+ nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(VAN_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
+ output_type=ImageClassifierOutputWithNoAttention,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
+ )
+ def forward(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.van(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
+
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
+
+ logits = self.classifier(pooled_output)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.config.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.config.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c0a825dc74043fd2991cc03e7e543c3defaed14d
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/configuration_mobilevit.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/configuration_mobilevit.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..faccf30b588c7d2ef999da0de4d1981053372848
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/configuration_mobilevit.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/feature_extraction_mobilevit.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/feature_extraction_mobilevit.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0217be49a925a159f14a34f61c023e892905635b
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/feature_extraction_mobilevit.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/modeling_mobilevit.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/modeling_mobilevit.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7f9f7b70080ba98528739c5f5c6eecb2fdfcc3d0
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilevit/__pycache__/modeling_mobilevit.cpython-310.pyc differ