diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_blenderbot_small.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_blenderbot_small.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4722c8a98a586405dd4e306a4f10f62d95df6a87
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_blenderbot_small.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_flax_blenderbot_small.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_flax_blenderbot_small.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..112061abe88c5ab9d5b8276c0856e91fb3e779f7
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_flax_blenderbot_small.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/configuration_blenderbot_small.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/configuration_blenderbot_small.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b54bd3760feeafd41e36eb76777f29b7d1a31dd
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/configuration_blenderbot_small.py
@@ -0,0 +1,389 @@
+# coding=utf-8
+# Copyright 2021 The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" BlenderbotSmall model configuration"""
+
+from collections import OrderedDict
+from typing import Any, Mapping, Optional
+
+from ... import PreTrainedTokenizer
+from ...configuration_utils import PretrainedConfig
+from ...file_utils import TensorType, is_torch_available
+from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast
+from ...onnx.utils import compute_effective_axis_dimension
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+from ..deprecated._archive_maps import BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class BlenderbotSmallConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`BlenderbotSmallModel`]. It is used to instantiate
+ an BlenderbotSmall model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the BlenderbotSmall
+ [facebook/blenderbot_small-90M](https://huggingface.co/facebook/blenderbot_small-90M) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 50265):
+ Vocabulary size of the BlenderbotSmall model. Defines the number of different tokens that can be
+ represented by the `inputs_ids` passed when calling [`BlenderbotSmallModel`] or [`TFBlenderbotSmallModel`].
+ d_model (`int`, *optional*, defaults to 512):
+ Dimensionality of the layers and the pooler layer.
+ encoder_layers (`int`, *optional*, defaults to 8):
+ Number of encoder layers.
+ decoder_layers (`int`, *optional*, defaults to 8):
+ Number of decoder layers.
+ encoder_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ decoder_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer decoder.
+ decoder_ffn_dim (`int`, *optional*, defaults to 2048):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
+ encoder_ffn_dim (`int`, *optional*, defaults to 2048):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ activation_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for activations inside the fully connected layer.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ init_std (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
+ for more details.
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
+ for more details.
+ scale_embedding (`bool`, *optional*, defaults to `False`):
+ Scale embeddings by diving by sqrt(d_model).
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models)
+ forced_eos_token_id (`int`, *optional*, defaults to 2):
+ The id of the token to force as the last generated token when `max_length` is reached. Usually set to
+ `eos_token_id`.
+
+ Example:
+
+ ```python
+ >>> from transformers import BlenderbotSmallConfig, BlenderbotSmallModel
+
+ >>> # Initializing a BlenderbotSmall facebook/blenderbot_small-90M style configuration
+ >>> configuration = BlenderbotSmallConfig()
+
+ >>> # Initializing a model (with random weights) from the facebook/blenderbot_small-90M style configuration
+ >>> model = BlenderbotSmallModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "blenderbot-small"
+ keys_to_ignore_at_inference = ["past_key_values"]
+ attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
+
+ def __init__(
+ self,
+ vocab_size=50265,
+ max_position_embeddings=512,
+ encoder_layers=8,
+ encoder_ffn_dim=2048,
+ encoder_attention_heads=16,
+ decoder_layers=8,
+ decoder_ffn_dim=2048,
+ decoder_attention_heads=16,
+ encoder_layerdrop=0.0,
+ decoder_layerdrop=0.0,
+ use_cache=True,
+ is_encoder_decoder=True,
+ activation_function="gelu",
+ d_model=512,
+ dropout=0.1,
+ attention_dropout=0.0,
+ activation_dropout=0.0,
+ init_std=0.02,
+ decoder_start_token_id=1,
+ scale_embedding=False,
+ pad_token_id=0,
+ bos_token_id=1,
+ eos_token_id=2,
+ forced_eos_token_id=2,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.d_model = d_model
+ self.encoder_ffn_dim = encoder_ffn_dim
+ self.encoder_layers = encoder_layers
+ self.encoder_attention_heads = encoder_attention_heads
+ self.decoder_ffn_dim = decoder_ffn_dim
+ self.decoder_layers = decoder_layers
+ self.decoder_attention_heads = decoder_attention_heads
+ self.dropout = dropout
+ self.attention_dropout = attention_dropout
+ self.activation_dropout = activation_dropout
+ self.activation_function = activation_function
+ self.init_std = init_std
+ self.encoder_layerdrop = encoder_layerdrop
+ self.decoder_layerdrop = decoder_layerdrop
+ self.use_cache = use_cache
+ self.num_hidden_layers = encoder_layers
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
+
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ is_encoder_decoder=is_encoder_decoder,
+ decoder_start_token_id=decoder_start_token_id,
+ forced_eos_token_id=forced_eos_token_id,
+ **kwargs,
+ )
+
+
+# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig
+class BlenderbotSmallOnnxConfig(OnnxSeq2SeqConfigWithPast):
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ if self.task in ["default", "seq2seq-lm"]:
+ common_inputs = OrderedDict(
+ [
+ ("input_ids", {0: "batch", 1: "encoder_sequence"}),
+ ("attention_mask", {0: "batch", 1: "encoder_sequence"}),
+ ]
+ )
+
+ if self.use_past:
+ common_inputs["decoder_input_ids"] = {0: "batch"}
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"}
+ else:
+ common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"}
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"}
+
+ if self.use_past:
+ self.fill_with_past_key_values_(common_inputs, direction="inputs")
+ elif self.task == "causal-lm":
+ # TODO: figure this case out.
+ common_inputs = OrderedDict(
+ [
+ ("input_ids", {0: "batch", 1: "encoder_sequence"}),
+ ("attention_mask", {0: "batch", 1: "encoder_sequence"}),
+ ]
+ )
+ if self.use_past:
+ num_encoder_layers, _ = self.num_layers
+ for i in range(num_encoder_layers):
+ common_inputs[f"past_key_values.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"}
+ common_inputs[f"past_key_values.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"}
+ else:
+ common_inputs = OrderedDict(
+ [
+ ("input_ids", {0: "batch", 1: "encoder_sequence"}),
+ ("attention_mask", {0: "batch", 1: "encoder_sequence"}),
+ ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
+ ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
+ ]
+ )
+
+ return common_inputs
+
+ @property
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
+ if self.task in ["default", "seq2seq-lm"]:
+ common_outputs = super().outputs
+ else:
+ common_outputs = super(OnnxConfigWithPast, self).outputs
+ if self.use_past:
+ num_encoder_layers, _ = self.num_layers
+ for i in range(num_encoder_layers):
+ common_outputs[f"present.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"}
+ common_outputs[f"present.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"}
+ return common_outputs
+
+ def _generate_dummy_inputs_for_default_and_seq2seq_lm(
+ self,
+ tokenizer: PreTrainedTokenizer,
+ batch_size: int = -1,
+ seq_length: int = -1,
+ is_pair: bool = False,
+ framework: Optional[TensorType] = None,
+ ) -> Mapping[str, Any]:
+ encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
+ tokenizer, batch_size, seq_length, is_pair, framework
+ )
+
+ # Generate decoder inputs
+ decoder_seq_length = seq_length if not self.use_past else 1
+ decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
+ tokenizer, batch_size, decoder_seq_length, is_pair, framework
+ )
+ decoder_inputs = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
+ common_inputs = dict(**encoder_inputs, **decoder_inputs)
+
+ if self.use_past:
+ if not is_torch_available():
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
+ else:
+ import torch
+ batch, encoder_seq_length = common_inputs["input_ids"].shape
+ decoder_seq_length = common_inputs["decoder_input_ids"].shape[1]
+ num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads
+ encoder_shape = (
+ batch,
+ num_encoder_attention_heads,
+ encoder_seq_length,
+ self._config.hidden_size // num_encoder_attention_heads,
+ )
+ decoder_past_length = decoder_seq_length + 3
+ decoder_shape = (
+ batch,
+ num_decoder_attention_heads,
+ decoder_past_length,
+ self._config.hidden_size // num_decoder_attention_heads,
+ )
+
+ common_inputs["decoder_attention_mask"] = torch.cat(
+ [common_inputs["decoder_attention_mask"], torch.ones(batch, decoder_past_length)], dim=1
+ )
+
+ common_inputs["past_key_values"] = []
+ # If the number of encoder and decoder layers are present in the model configuration, both are considered
+ num_encoder_layers, num_decoder_layers = self.num_layers
+ min_num_layers = min(num_encoder_layers, num_decoder_layers)
+ max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers
+ remaining_side_name = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
+
+ for _ in range(min_num_layers):
+ common_inputs["past_key_values"].append(
+ (
+ torch.zeros(decoder_shape),
+ torch.zeros(decoder_shape),
+ torch.zeros(encoder_shape),
+ torch.zeros(encoder_shape),
+ )
+ )
+ # TODO: test this.
+ shape = encoder_shape if remaining_side_name == "encoder" else decoder_shape
+ for _ in range(min_num_layers, max_num_layers):
+ common_inputs["past_key_values"].append((torch.zeros(shape), torch.zeros(shape)))
+ return common_inputs
+
+ def _generate_dummy_inputs_for_causal_lm(
+ self,
+ tokenizer: PreTrainedTokenizer,
+ batch_size: int = -1,
+ seq_length: int = -1,
+ is_pair: bool = False,
+ framework: Optional[TensorType] = None,
+ ) -> Mapping[str, Any]:
+ common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
+ tokenizer, batch_size, seq_length, is_pair, framework
+ )
+
+ if self.use_past:
+ if not is_torch_available():
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
+ else:
+ import torch
+ batch, seqlen = common_inputs["input_ids"].shape
+ # Not using the same length for past_key_values
+ past_key_values_length = seqlen + 2
+ num_encoder_layers, _ = self.num_layers
+ num_encoder_attention_heads, _ = self.num_attention_heads
+ past_shape = (
+ batch,
+ num_encoder_attention_heads,
+ past_key_values_length,
+ self._config.hidden_size // num_encoder_attention_heads,
+ )
+
+ mask_dtype = common_inputs["attention_mask"].dtype
+ common_inputs["attention_mask"] = torch.cat(
+ [common_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1
+ )
+ common_inputs["past_key_values"] = [
+ (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_encoder_layers)
+ ]
+ return common_inputs
+
+ def _generate_dummy_inputs_for_sequence_classification_and_question_answering(
+ self,
+ tokenizer: PreTrainedTokenizer,
+ batch_size: int = -1,
+ seq_length: int = -1,
+ is_pair: bool = False,
+ framework: Optional[TensorType] = None,
+ ) -> Mapping[str, Any]:
+ # Copied from OnnxConfig.generate_dummy_inputs
+ # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
+ # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
+ batch_size = compute_effective_axis_dimension(
+ batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0
+ )
+
+ # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
+ token_to_add = tokenizer.num_special_tokens_to_add(is_pair)
+ seq_length = compute_effective_axis_dimension(
+ seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add
+ )
+
+ # Generate dummy inputs according to compute batch and sequence
+ dummy_input = [" ".join([tokenizer.unk_token]) * seq_length] * batch_size
+ common_inputs = dict(tokenizer(dummy_input, return_tensors=framework))
+ return common_inputs
+
+ def generate_dummy_inputs(
+ self,
+ tokenizer: PreTrainedTokenizer,
+ batch_size: int = -1,
+ seq_length: int = -1,
+ is_pair: bool = False,
+ framework: Optional[TensorType] = None,
+ ) -> Mapping[str, Any]:
+ if self.task in ["default", "seq2seq-lm"]:
+ common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm(
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
+ )
+
+ elif self.task == "causal-lm":
+ common_inputs = self._generate_dummy_inputs_for_causal_lm(
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
+ )
+ else:
+ common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
+ )
+
+ return common_inputs
+
+ def _flatten_past_key_values_(self, flattened_output, name, idx, t):
+ if self.task in ["default", "seq2seq-lm"]:
+ flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t)
+ else:
+ flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_(
+ flattened_output, name, idx, t
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py
new file mode 100644
index 0000000000000000000000000000000000000000..01206831ac96c3cdf3ccb0401be67111bd6d9a4b
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py
@@ -0,0 +1,1526 @@
+# coding=utf-8
+# Copyright 2021 The Facebook, Inc and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" TF 2.0 BlenderbotSmall model."""
+
+
+from __future__ import annotations
+
+import random
+from typing import List, Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ...activations_tf import get_tf_activation
+from ...modeling_tf_outputs import (
+ TFBaseModelOutput,
+ TFBaseModelOutputWithPastAndCrossAttentions,
+ TFSeq2SeqLMOutput,
+ TFSeq2SeqModelOutput,
+)
+
+# Public API
+from ...modeling_tf_utils import (
+ TFCausalLanguageModelingLoss,
+ TFPreTrainedModel,
+ keras,
+ keras_serializable,
+ unpack_inputs,
+)
+from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
+from ...utils import (
+ add_code_sample_docstrings,
+ add_end_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_blenderbot_small import BlenderbotSmallConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "facebook/blenderbot_small-90M"
+_CONFIG_FOR_DOC = "BlenderbotSmallConfig"
+
+
+LARGE_NEGATIVE = -1e8
+
+
+# Copied from transformers.models.bart.modeling_tf_bart.shift_tokens_right
+def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int):
+ pad_token_id = tf.cast(pad_token_id, input_ids.dtype)
+ decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype)
+ start_tokens = tf.fill(
+ (shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype)
+ )
+ shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)
+ # replace possible -100 values in labels by `pad_token_id`
+ shifted_input_ids = tf.where(
+ shifted_input_ids == -100,
+ tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)),
+ shifted_input_ids,
+ )
+
+ # "Verify that `labels` has only positive values and -100"
+ assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype))
+
+ # Make sure the assertion op is called by wrapping the result in an identity no-op
+ with tf.control_dependencies([assert_gte0]):
+ shifted_input_ids = tf.identity(shifted_input_ids)
+
+ return shifted_input_ids
+
+
+# Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask
+def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0):
+ """
+ Make causal mask used for bi-directional self-attention.
+ """
+ bsz = input_ids_shape[0]
+ tgt_len = input_ids_shape[1]
+ mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE
+ mask_cond = tf.range(shape_list(mask)[-1])
+
+ mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask)
+
+ if past_key_values_length > 0:
+ mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1)
+
+ return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1))
+
+
+# Copied from transformers.models.bart.modeling_tf_bart._expand_mask
+def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None):
+ """
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
+ """
+ src_len = shape_list(mask)[1]
+ tgt_len = tgt_len if tgt_len is not None else src_len
+ one_cst = tf.constant(1.0)
+ mask = tf.cast(mask, dtype=one_cst.dtype)
+ expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
+
+ return (one_cst - expanded_mask) * LARGE_NEGATIVE
+
+
+# Copied from transformers.models.blenderbot.modeling_tf_blenderbot.TFBlenderbotLearnedPositionalEmbedding with Blenderbot->BlenderbotSmall
+class TFBlenderbotSmallLearnedPositionalEmbedding(keras.layers.Embedding):
+ """
+ This module learns positional embeddings up to a fixed maximum size.
+ """
+
+ def __init__(self, num_embeddings: int, embedding_dim: int, **kwargs):
+ super().__init__(num_embeddings, embedding_dim, **kwargs)
+
+ def call(
+ self, input_shape: tf.TensorShape, past_key_values_length: int = 0, position_ids: tf.Tensor | None = None
+ ):
+ """Input is expected to be of size [bsz x seqlen]."""
+ if position_ids is None:
+ seq_len = input_shape[1]
+ position_ids = tf.range(seq_len, delta=1, name="range")
+ position_ids += past_key_values_length
+
+ return super().call(tf.cast(position_ids, dtype=tf.int32))
+
+
+# Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->BlenderbotSmall
+class TFBlenderbotSmallAttention(keras.layers.Layer):
+ """Multi-headed attention from "Attention Is All You Need"""
+
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ is_decoder: bool = False,
+ bias: bool = True,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ self.embed_dim = embed_dim
+
+ self.num_heads = num_heads
+ self.dropout = keras.layers.Dropout(dropout)
+ self.head_dim = embed_dim // num_heads
+ if (self.head_dim * num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+
+ self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj")
+ self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj")
+ self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj")
+ self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj")
+
+ def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):
+ return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ key_value_states: tf.Tensor | None = None,
+ past_key_value: Tuple[Tuple[tf.Tensor]] | None = None,
+ attention_mask: tf.Tensor | None = None,
+ layer_head_mask: tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Tuple[tf.Tensor, tf.Tensor | None]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+ bsz, tgt_len, embed_dim = shape_list(hidden_states)
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scaling
+ # get key, value proj
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = tf.concat([past_key_value[0], key_states], axis=2)
+ value_states = tf.concat([past_key_value[1], value_states], axis=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)
+ key_states = tf.reshape(key_states, proj_shape)
+ value_states = tf.reshape(value_states, proj_shape)
+
+ src_len = shape_list(key_states)[1]
+ attn_weights = tf.matmul(query_states, key_states, transpose_b=True)
+
+ tf.debugging.assert_equal(
+ shape_list(attn_weights),
+ [bsz * self.num_heads, tgt_len, src_len],
+ message=(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {shape_list(attn_weights)}"
+ ),
+ )
+
+ if attention_mask is not None:
+ tf.debugging.assert_equal(
+ shape_list(attention_mask),
+ [bsz, 1, tgt_len, src_len],
+ message=(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
+ f" {shape_list(attention_mask)}"
+ ),
+ )
+
+ attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)
+ attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask
+ attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
+
+ attn_weights = stable_softmax(attn_weights, axis=-1)
+
+ if layer_head_mask is not None:
+ tf.debugging.assert_equal(
+ shape_list(layer_head_mask),
+ [self.num_heads],
+ message=(
+ f"Head mask for a single layer should be of size {(self.num_heads)}, but is"
+ f" {shape_list(layer_head_mask)}"
+ ),
+ )
+
+ attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(
+ attn_weights, (bsz, self.num_heads, tgt_len, src_len)
+ )
+ attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
+
+ attn_probs = self.dropout(attn_weights, training=training)
+ attn_output = tf.matmul(attn_probs, value_states)
+
+ tf.debugging.assert_equal(
+ shape_list(attn_output),
+ [bsz * self.num_heads, tgt_len, self.head_dim],
+ message=(
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {shape_list(attn_output)}"
+ ),
+ )
+
+ attn_output = tf.transpose(
+ tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)
+ )
+ attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))
+
+ attn_output = self.out_proj(attn_output)
+ attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))
+
+ return attn_output, attn_weights, past_key_value
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "k_proj", None) is not None:
+ with tf.name_scope(self.k_proj.name):
+ self.k_proj.build([None, None, self.embed_dim])
+ if getattr(self, "q_proj", None) is not None:
+ with tf.name_scope(self.q_proj.name):
+ self.q_proj.build([None, None, self.embed_dim])
+ if getattr(self, "v_proj", None) is not None:
+ with tf.name_scope(self.v_proj.name):
+ self.v_proj.build([None, None, self.embed_dim])
+ if getattr(self, "out_proj", None) is not None:
+ with tf.name_scope(self.out_proj.name):
+ self.out_proj.build([None, None, self.embed_dim])
+
+
+# Copied from transformers.models.bart.modeling_tf_bart.TFBartEncoderLayer with Bart->BlenderbotSmall
+class TFBlenderbotSmallEncoderLayer(keras.layers.Layer):
+ def __init__(self, config: BlenderbotSmallConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.embed_dim = config.d_model
+ self.self_attn = TFBlenderbotSmallAttention(
+ self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn"
+ )
+ self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
+ self.dropout = keras.layers.Dropout(config.dropout)
+ self.activation_fn = get_tf_activation(config.activation_function)
+ self.activation_dropout = keras.layers.Dropout(config.activation_dropout)
+ self.fc1 = keras.layers.Dense(config.encoder_ffn_dim, name="fc1")
+ self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2")
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
+ self.config = config
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ attention_mask: np.ndarray | tf.Tensor | None,
+ layer_head_mask: tf.Tensor | None,
+ training: Optional[bool] = False,
+ ) -> tf.Tensor:
+ """
+ Args:
+ hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`tf.Tensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
+ `(encoder_attention_heads,)`
+ """
+ residual = hidden_states
+ hidden_states, self_attn_weights, _ = self.self_attn(
+ hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask
+ )
+
+ tf.debugging.assert_equal(
+ shape_list(hidden_states),
+ shape_list(residual),
+ message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}",
+ )
+
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = residual + hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ residual = hidden_states
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = self.activation_dropout(hidden_states, training=training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = residual + hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ return hidden_states, self_attn_weights
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "self_attn", None) is not None:
+ with tf.name_scope(self.self_attn.name):
+ self.self_attn.build(None)
+ if getattr(self, "self_attn_layer_norm", None) is not None:
+ with tf.name_scope(self.self_attn_layer_norm.name):
+ self.self_attn_layer_norm.build([None, None, self.embed_dim])
+ if getattr(self, "fc1", None) is not None:
+ with tf.name_scope(self.fc1.name):
+ self.fc1.build([None, None, self.embed_dim])
+ if getattr(self, "fc2", None) is not None:
+ with tf.name_scope(self.fc2.name):
+ self.fc2.build([None, None, self.config.encoder_ffn_dim])
+ if getattr(self, "final_layer_norm", None) is not None:
+ with tf.name_scope(self.final_layer_norm.name):
+ self.final_layer_norm.build([None, None, self.embed_dim])
+
+
+# Copied from transformers.models.bart.modeling_tf_bart.TFBartDecoderLayer with Bart->BlenderbotSmall
+class TFBlenderbotSmallDecoderLayer(keras.layers.Layer):
+ def __init__(self, config: BlenderbotSmallConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.embed_dim = config.d_model
+ self.self_attn = TFBlenderbotSmallAttention(
+ embed_dim=self.embed_dim,
+ num_heads=config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ name="self_attn",
+ is_decoder=True,
+ )
+ self.dropout = keras.layers.Dropout(config.dropout)
+ self.activation_fn = get_tf_activation(config.activation_function)
+ self.activation_dropout = keras.layers.Dropout(config.activation_dropout)
+
+ self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
+ self.encoder_attn = TFBlenderbotSmallAttention(
+ self.embed_dim,
+ config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ name="encoder_attn",
+ is_decoder=True,
+ )
+ self.encoder_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm")
+ self.fc1 = keras.layers.Dense(config.decoder_ffn_dim, name="fc1")
+ self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2")
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
+ self.config = config
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ encoder_hidden_states: np.ndarray | tf.Tensor | None = None,
+ encoder_attention_mask: np.ndarray | tf.Tensor | None = None,
+ layer_head_mask: tf.Tensor | None = None,
+ cross_attn_layer_head_mask: tf.Tensor | None = None,
+ past_key_value: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
+ training: Optional[bool] = False,
+ ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:
+ """
+ Args:
+ hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`tf.Tensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ encoder_hidden_states (`tf.Tensor`):
+ cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
+ encoder_attention_mask (`tf.Tensor`): encoder attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
+ `(decoder_attention_heads,)`
+ cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module.
+ `(decoder_attention_heads,)`
+ past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states
+ """
+ residual = hidden_states
+
+ # Self Attention
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ past_key_value=self_attn_past_key_value,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ )
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = residual + hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Cross-Attention Block
+ cross_attn_present_key_value = None
+ cross_attn_weights = None
+ if encoder_hidden_states is not None:
+ residual = hidden_states
+
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
+ hidden_states=hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attention_mask,
+ layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=cross_attn_past_key_value,
+ )
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = residual + hidden_states
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
+
+ # add cross-attn to positions 3,4 of present_key_value tuple
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = self.activation_dropout(hidden_states, training=training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = residual + hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ return (
+ hidden_states,
+ self_attn_weights,
+ cross_attn_weights,
+ present_key_value,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "self_attn", None) is not None:
+ with tf.name_scope(self.self_attn.name):
+ self.self_attn.build(None)
+ if getattr(self, "self_attn_layer_norm", None) is not None:
+ with tf.name_scope(self.self_attn_layer_norm.name):
+ self.self_attn_layer_norm.build([None, None, self.embed_dim])
+ if getattr(self, "encoder_attn", None) is not None:
+ with tf.name_scope(self.encoder_attn.name):
+ self.encoder_attn.build(None)
+ if getattr(self, "encoder_attn_layer_norm", None) is not None:
+ with tf.name_scope(self.encoder_attn_layer_norm.name):
+ self.encoder_attn_layer_norm.build([None, None, self.embed_dim])
+ if getattr(self, "fc1", None) is not None:
+ with tf.name_scope(self.fc1.name):
+ self.fc1.build([None, None, self.embed_dim])
+ if getattr(self, "fc2", None) is not None:
+ with tf.name_scope(self.fc2.name):
+ self.fc2.build([None, None, self.config.decoder_ffn_dim])
+ if getattr(self, "final_layer_norm", None) is not None:
+ with tf.name_scope(self.final_layer_norm.name):
+ self.final_layer_norm.build([None, None, self.embed_dim])
+
+
+class TFBlenderbotSmallPreTrainedModel(TFPreTrainedModel):
+ config_class = BlenderbotSmallConfig
+ base_model_prefix = "model"
+
+
+BLENDERBOT_SMALL_START_DOCSTRING = r"""
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TensorFlow models and layers in `transformers` accept two formats as input:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional argument.
+
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
+ positional argument:
+
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
+
+ Note that when creating models and layers with
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
+ about any of this, as you can just pass inputs like you would to any other Python function!
+
+
+
+ Args:
+ config ([`BlenderbotSmallConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+BLENDERBOT_SMALL_GENERATION_EXAMPLE = r"""
+ Conversation example::
+
+ ```py
+ >>> from transformers import AutoTokenizer, TFBlenderbotSmallForConditionalGeneration
+
+ >>> mname = "facebook/blenderbot_small-90M"
+ >>> model = BlenderbotSmallForConditionalGeneration.from_pretrained(mname)
+ >>> tokenizer = AutoTokenizer.from_pretrained(mname)
+
+ >>> UTTERANCE = "My friends are cool but they eat too many carbs."
+ >>> print("Human: ", UTTERANCE)
+ >>> inputs = tokenizer([UTTERANCE], return_tensors="tf")
+
+ >>> reply_ids = model.generate(**inputs)
+ >>> print("Bot: ", tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0])
+ what kind of carbs do they eat? i don't know much about carbs.
+
+ >>> REPLY = "I'm not sure"
+ >>> print("Human: ", REPLY)
+ >>> NEXT_UTTERANCE = (
+ ... "My friends are cool but they eat too many carbs. "
+ ... "what kind of carbs do they eat? i don't know much about carbs. "
+ ... "I'm not sure."
+ ... )
+
+ >>> inputs = tokenizer([NEXT_UTTERANCE], return_tensors="tf")
+ >>> inputs.pop("token_type_ids")
+ >>> next_reply_ids = model.generate(**inputs)
+ >>> print("Bot: ", tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0])
+ ```
+"""
+
+BLENDERBOT_SMALL_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`tf.Tensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`tf.Tensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ BlenderbotSmall uses the `bos_token_id` as the starting token for `decoder_input_ids` generation. If
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+ decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.
+ decoder_position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
+ range `[0, config.max_position_embeddings - 1]`.
+ head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ encoder_outputs (`tf.FloatTensor`, *optional*):
+ hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ of shape `(batch_size, sequence_length, hidden_size)` is a sequence of
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`). Set to `False` during training, `True` during generation
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
+ config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+"""
+
+
+@keras_serializable
+class TFBlenderbotSmallEncoder(keras.layers.Layer):
+ config_class = BlenderbotSmallConfig
+ """
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
+ [`TFBlenderbotSmallEncoderLayer`].
+
+ Args:
+ config: BlenderbotSmallConfig
+ """
+
+ def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[keras.layers.Embedding] = None, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+ self.dropout = keras.layers.Dropout(config.dropout)
+ self.layerdrop = config.encoder_layerdrop
+ self.padding_idx = config.pad_token_id
+ self.max_source_positions = config.max_position_embeddings
+ self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
+
+ self.embed_tokens = embed_tokens
+ self.embed_positions = TFBlenderbotSmallLearnedPositionalEmbedding(
+ config.max_position_embeddings,
+ config.d_model,
+ name="embed_positions",
+ )
+ self.layers = [TFBlenderbotSmallEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)]
+ self.layernorm_embedding = keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding")
+ self.embed_dim = config.d_model
+
+ def get_embed_tokens(self):
+ return self.embed_tokens
+
+ def set_embed_tokens(self, embed_tokens):
+ self.embed_tokens = embed_tokens
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids=None,
+ inputs_embeds=None,
+ attention_mask=None,
+ head_mask=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ training=False,
+ ):
+ """
+ Args:
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, `optional):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value
+ in the config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail. This argument can be used only in eager mode, in graph mode the value in the config
+ will be used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used
+ in eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+ """
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = shape_list(input_ids)
+ elif inputs_embeds is not None:
+ input_shape = shape_list(inputs_embeds)[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if inputs_embeds is None:
+ check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim)
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+
+ embed_pos = self.embed_positions(input_shape)
+ hidden_states = inputs_embeds + embed_pos
+ hidden_states = self.layernorm_embedding(hidden_states)
+ hidden_states = self.dropout(hidden_states, training=training)
+
+ # check attention mask and invert
+ if attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ attention_mask = _expand_mask(attention_mask)
+ else:
+ attention_mask = None
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ # check if head_mask has a correct number of layers specified if desired
+ if head_mask is not None:
+ tf.debugging.assert_equal(
+ shape_list(head_mask)[0],
+ len(self.layers),
+ message=(
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
+ f" {shape_list(head_mask)[0]}."
+ ),
+ )
+
+ # encoder layers
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ dropout_probability = random.uniform(0, 1)
+ if training and (dropout_probability < self.layerdrop): # skip the layer
+ continue
+
+ hidden_states, attn = encoder_layer(
+ hidden_states,
+ attention_mask,
+ head_mask[idx] if head_mask is not None else None,
+ )
+
+ if output_attentions:
+ all_attentions += (attn,)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return TFBaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embed_positions", None) is not None:
+ with tf.name_scope(self.embed_positions.name):
+ self.embed_positions.build(None)
+ if getattr(self, "layernorm_embedding", None) is not None:
+ with tf.name_scope(self.layernorm_embedding.name):
+ self.layernorm_embedding.build([None, None, self.embed_dim])
+ if getattr(self, "layers", None) is not None:
+ for layer in self.layers:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+@keras_serializable
+class TFBlenderbotSmallDecoder(keras.layers.Layer):
+ config_class = BlenderbotSmallConfig
+ """
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TFBlenderbotSmallDecoderLayer`]
+
+ Args:
+ config: BlenderbotSmallConfig
+ embed_tokens: output embedding
+ """
+
+ def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[keras.layers.Embedding] = None, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+ self.padding_idx = config.pad_token_id
+ self.embed_tokens = embed_tokens
+ self.layerdrop = config.decoder_layerdrop
+ self.embed_positions = TFBlenderbotSmallLearnedPositionalEmbedding(
+ config.max_position_embeddings,
+ config.d_model,
+ name="embed_positions",
+ )
+ self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
+ self.layers = [TFBlenderbotSmallDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)]
+ self.layernorm_embedding = keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding")
+
+ self.dropout = keras.layers.Dropout(config.dropout)
+
+ def get_embed_tokens(self):
+ return self.embed_tokens
+
+ def set_embed_tokens(self, embed_tokens):
+ self.embed_tokens = embed_tokens
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids=None,
+ inputs_embeds=None,
+ attention_mask=None,
+ position_ids=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ head_mask=None,
+ cross_attn_head_mask=None,
+ past_key_values=None,
+ use_cache=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ training=False,
+ ):
+ r"""
+ Args:
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
+ range `[0, config.max_position_embeddings - 1]`.
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
+ of the decoder.
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
+ selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
+ decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value
+ in the config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail. This argument can be used only in eager mode, in graph mode the value in the config
+ will be used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used
+ in eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+ """
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = shape_list(input_ids)
+ elif inputs_embeds is not None:
+ input_shape = shape_list(inputs_embeds)[:-1]
+ else:
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
+
+ past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0
+
+ if inputs_embeds is None:
+ check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim)
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ if input_shape[-1] > 1:
+ combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length)
+ else:
+ combined_attention_mask = _expand_mask(
+ tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1]
+ )
+
+ if attention_mask is not None:
+ combined_attention_mask = combined_attention_mask + _expand_mask(attention_mask, tgt_len=input_shape[-1])
+
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1])
+
+ # embed positions
+ if position_ids is None:
+ positions = self.embed_positions(input_shape, past_key_values_length)
+ else:
+ positions = self.embed_positions(input_shape, position_ids=position_ids)
+
+ hidden_states = self.layernorm_embedding(inputs_embeds) + positions
+ hidden_states = self.dropout(hidden_states, training=training)
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_cross_attns = () if (output_attentions and encoder_hidden_states is not None) else None
+ present_key_values = () if use_cache else None
+
+ # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired
+ for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]:
+ if attn_mask is not None:
+ tf.debugging.assert_equal(
+ shape_list(attn_mask)[0],
+ len(self.layers),
+ message=(
+ f"The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for"
+ f" {shape_list(attn_mask)[0]}."
+ ),
+ )
+
+ for idx, decoder_layer in enumerate(self.layers):
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+ dropout_probability = random.uniform(0, 1)
+
+ if training and (dropout_probability < self.layerdrop):
+ continue
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+
+ hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer(
+ hidden_states,
+ attention_mask=combined_attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ layer_head_mask=head_mask[idx] if head_mask is not None else None,
+ cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
+ past_key_value=past_key_value,
+ )
+
+ if use_cache:
+ present_key_values += (present_key_value,)
+
+ if output_attentions:
+ all_self_attns += (layer_self_attn,)
+
+ if encoder_hidden_states is not None:
+ all_cross_attns += (layer_cross_attn,)
+
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ if not return_dict:
+ return hidden_states, present_key_values, all_hidden_states, all_self_attns, all_cross_attns
+ else:
+ return TFBaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=present_key_values,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ cross_attentions=all_cross_attns,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embed_positions", None) is not None:
+ with tf.name_scope(self.embed_positions.name):
+ self.embed_positions.build(None)
+ if getattr(self, "layernorm_embedding", None) is not None:
+ with tf.name_scope(self.layernorm_embedding.name):
+ self.layernorm_embedding.build([None, None, self.config.d_model])
+ if getattr(self, "layers", None) is not None:
+ for layer in self.layers:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+@keras_serializable
+class TFBlenderbotSmallMainLayer(keras.layers.Layer):
+ config_class = BlenderbotSmallConfig
+
+ def __init__(self, config: BlenderbotSmallConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.shared = keras.layers.Embedding(
+ input_dim=config.vocab_size,
+ output_dim=config.d_model,
+ embeddings_initializer=keras.initializers.TruncatedNormal(stddev=self.config.init_std),
+ name="model.shared",
+ )
+ # Additional attribute to specify the expected name scope of the layer (for loading/storing weights)
+ self.shared.load_weight_prefix = "model.shared"
+
+ self.encoder = TFBlenderbotSmallEncoder(config, self.shared, name="encoder")
+ self.decoder = TFBlenderbotSmallDecoder(config, self.shared, name="decoder")
+
+ def get_input_embeddings(self):
+ return self.shared
+
+ def set_input_embeddings(self, new_embeddings):
+ self.shared = new_embeddings
+ self.encoder.embed_tokens = self.shared
+ self.decoder.embed_tokens = self.shared
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ decoder_input_ids=None,
+ decoder_attention_mask=None,
+ decoder_position_ids=None,
+ head_mask=None,
+ decoder_head_mask=None,
+ cross_attn_head_mask=None,
+ encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
+ past_key_values=None,
+ inputs_embeds=None,
+ decoder_inputs_embeds=None,
+ use_cache=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ training=False,
+ **kwargs,
+ ):
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+
+ if encoder_outputs is None:
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True
+ elif return_dict and not isinstance(encoder_outputs, TFBaseModelOutput):
+ encoder_outputs = TFBaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+ # If the user passed a TFBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False
+ elif not return_dict and not isinstance(encoder_outputs, tuple):
+ encoder_outputs = encoder_outputs.to_tuple()
+
+ decoder_outputs = self.decoder(
+ decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ position_ids=decoder_position_ids,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=attention_mask,
+ head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+
+ return TFSeq2SeqModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ # The shared/tied weights expect to be in the model base namespace
+ # Adding "/" to the end (not the start!) of a tf.name_scope puts it in the root namespace rather than
+ # the current one.
+ with tf.name_scope(self.shared.load_weight_prefix + "/" + self.shared.name + "/"):
+ self.shared.build(None)
+ if getattr(self, "encoder", None) is not None:
+ with tf.name_scope(self.encoder.name):
+ self.encoder.build(None)
+ if getattr(self, "decoder", None) is not None:
+ with tf.name_scope(self.decoder.name):
+ self.decoder.build(None)
+
+
+@add_start_docstrings(
+ "The bare BLENDERBOT_SMALL Model outputting raw hidden-states without any specific head on top.",
+ BLENDERBOT_SMALL_START_DOCSTRING,
+)
+class TFBlenderbotSmallModel(TFBlenderbotSmallPreTrainedModel):
+ def __init__(self, config: BlenderbotSmallConfig, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.model = TFBlenderbotSmallMainLayer(config, name="model")
+
+ def get_encoder(self):
+ return self.model.encoder
+
+ def get_decoder(self):
+ return self.model.decoder
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(BLENDERBOT_SMALL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFSeq2SeqModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: tf.Tensor | None = None,
+ attention_mask: tf.Tensor | None = None,
+ decoder_input_ids: tf.Tensor | None = None,
+ decoder_attention_mask: tf.Tensor | None = None,
+ decoder_position_ids: tf.Tensor | None = None,
+ head_mask: tf.Tensor | None = None,
+ decoder_head_mask: tf.Tensor | None = None,
+ cross_attn_head_mask: tf.Tensor | None = None,
+ encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
+ past_key_values: List[tf.Tensor] | None = None,
+ inputs_embeds: tf.Tensor | None = None,
+ decoder_inputs_embeds: tf.Tensor | None = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ **kwargs,
+ ) -> Union[Tuple[tf.Tensor], TFSeq2SeqModelOutput]:
+ outputs = self.model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ decoder_position_ids=decoder_position_ids,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ encoder_outputs=encoder_outputs,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ decoder_inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return outputs
+
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartModel.serving_output
+ def serving_output(self, output):
+ pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
+ dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
+ dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
+ cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
+ enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
+ enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
+
+ return TFSeq2SeqModelOutput(
+ last_hidden_state=output.last_hidden_state,
+ past_key_values=pkv,
+ decoder_hidden_states=dec_hs,
+ decoder_attentions=dec_attns,
+ cross_attentions=cross_attns,
+ encoder_last_hidden_state=output.encoder_last_hidden_state,
+ encoder_hidden_states=enc_hs,
+ encoder_attentions=enc_attns,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "model", None) is not None:
+ with tf.name_scope(self.model.name):
+ self.model.build(None)
+
+
+# Copied from transformers.models.bart.modeling_tf_bart.BiasLayer
+class BiasLayer(keras.layers.Layer):
+ """
+ Bias as a layer. It is used for serialization purposes: `keras.Model.save_weights` stores on a per-layer basis,
+ so all weights have to be registered in a layer.
+ """
+
+ def __init__(self, shape, initializer, trainable, name, **kwargs):
+ super().__init__(name=name, **kwargs)
+ # Note: the name of this variable will NOT be scoped when serialized, i.e. it will not be in the format of
+ # "outer_layer/inner_layer/.../name:0". Instead, it will be "name:0". For further details, see:
+ # https://github.com/huggingface/transformers/pull/18833#issuecomment-1233090214
+ self.bias = self.add_weight(name=name, shape=shape, initializer=initializer, trainable=trainable)
+
+ def call(self, x):
+ return x + self.bias
+
+
+@add_start_docstrings(
+ "The BLENDERBOT_SMALL Model with a language modeling head. Can be used for summarization.",
+ BLENDERBOT_SMALL_START_DOCSTRING,
+)
+class TFBlenderbotSmallForConditionalGeneration(TFBlenderbotSmallPreTrainedModel, TFCausalLanguageModelingLoss):
+ _keys_to_ignore_on_load_unexpected = [
+ r"model.encoder.embed_tokens.weight",
+ r"model.decoder.embed_tokens.weight",
+ ]
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.model = TFBlenderbotSmallMainLayer(config, name="model")
+ self.use_cache = config.use_cache
+ # final_bias_logits is registered as a buffer in pytorch, so not trainable for the sake of consistency.
+ self.bias_layer = BiasLayer(
+ name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False
+ )
+
+ def get_decoder(self):
+ return self.model.decoder
+
+ def get_encoder(self):
+ return self.model.encoder
+
+ def get_output_embeddings(self):
+ return self.get_input_embeddings()
+
+ def set_output_embeddings(self, value):
+ self.set_input_embeddings(value)
+
+ def get_bias(self):
+ return {"final_logits_bias": self.bias_layer.bias}
+
+ def set_bias(self, value):
+ # Replaces the existing layers containing bias for correct (de)serialization.
+ vocab_size = value["final_logits_bias"].shape[-1]
+ self.bias_layer = BiasLayer(
+ name="final_logits_bias", shape=[1, vocab_size], initializer="zeros", trainable=False
+ )
+ self.bias_layer.bias.assign(value["final_logits_bias"])
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(BLENDERBOT_SMALL_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+ @add_end_docstrings(BLENDERBOT_SMALL_GENERATION_EXAMPLE)
+ def call(
+ self,
+ input_ids: tf.Tensor | None = None,
+ attention_mask: tf.Tensor | None = None,
+ decoder_input_ids: tf.Tensor | None = None,
+ decoder_attention_mask: tf.Tensor | None = None,
+ decoder_position_ids: tf.Tensor | None = None,
+ head_mask: tf.Tensor | None = None,
+ decoder_head_mask: tf.Tensor | None = None,
+ cross_attn_head_mask: tf.Tensor | None = None,
+ encoder_outputs: Optional[TFBaseModelOutput] = None,
+ past_key_values: List[tf.Tensor] | None = None,
+ inputs_embeds: tf.Tensor | None = None,
+ decoder_inputs_embeds: tf.Tensor | None = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[Tuple[tf.Tensor], TFSeq2SeqLMOutput]:
+ r"""
+ labels (`tf.tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ Returns:
+
+ """
+
+ if labels is not None:
+ labels = tf.where(
+ labels == self.config.pad_token_id,
+ tf.cast(tf.fill(shape_list(labels), -100), labels.dtype),
+ labels,
+ )
+ use_cache = False
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
+ decoder_input_ids = shift_tokens_right(
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
+ )
+
+ outputs = self.model(
+ input_ids,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ decoder_position_ids=decoder_position_ids,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ encoder_outputs=encoder_outputs,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ decoder_inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True)
+ lm_logits = self.bias_layer(lm_logits)
+ masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits)
+
+ if not return_dict:
+ output = (lm_logits,) + outputs[1:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+ return TFSeq2SeqLMOutput(
+ loss=masked_lm_loss,
+ logits=lm_logits,
+ past_key_values=outputs.past_key_values, # index 1 of d outputs
+ decoder_hidden_states=outputs.decoder_hidden_states, # index 2 of d outputs
+ decoder_attentions=outputs.decoder_attentions, # index 3 of d outputs
+ cross_attentions=outputs.cross_attentions, # index 4 of d outputs
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state, # index 0 of encoder outputs
+ encoder_hidden_states=outputs.encoder_hidden_states, # 1 of e out
+ encoder_attentions=outputs.encoder_attentions, # 2 of e out
+ )
+
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.serving_output
+ def serving_output(self, output):
+ pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
+ dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
+ dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
+ cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
+ enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
+ enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
+
+ return TFSeq2SeqLMOutput(
+ logits=output.logits,
+ past_key_values=pkv,
+ decoder_hidden_states=dec_hs,
+ decoder_attentions=dec_attns,
+ cross_attentions=cross_attns,
+ encoder_last_hidden_state=output.encoder_last_hidden_state,
+ encoder_hidden_states=enc_hs,
+ encoder_attentions=enc_attns,
+ )
+
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.prepare_inputs_for_generation
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ decoder_attention_mask=None,
+ head_mask=None,
+ decoder_head_mask=None,
+ cross_attn_head_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ # cut decoder_input_ids if past_key_values is used
+ if past_key_values is not None:
+ decoder_input_ids = decoder_input_ids[:, -1:]
+
+ if decoder_attention_mask is not None: # xla
+ decoder_position_ids = tf.math.cumsum(decoder_attention_mask, axis=-1, exclusive=True)[:, -1:]
+ elif past_key_values is not None: # no xla + past_key_values
+ decoder_position_ids = past_key_values[0][0].shape[2]
+ else: # no xla + no past_key_values
+ decoder_position_ids = tf.range(decoder_input_ids.shape[1])
+
+ return {
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
+ "encoder_outputs": encoder_outputs,
+ "past_key_values": past_key_values,
+ "decoder_input_ids": decoder_input_ids,
+ "attention_mask": attention_mask,
+ "decoder_attention_mask": decoder_attention_mask,
+ "decoder_position_ids": decoder_position_ids,
+ "head_mask": head_mask,
+ "decoder_head_mask": decoder_head_mask,
+ "cross_attn_head_mask": cross_attn_head_mask,
+ "use_cache": use_cache, # change this to avoid caching (presumably for debugging)
+ }
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "model", None) is not None:
+ with tf.name_scope(self.model.name):
+ self.model.build(None)
+ if getattr(self, "bias_layer", None) is not None:
+ with tf.name_scope(self.bias_layer.name):
+ self.bias_layer.build(None)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..28b9a34290c8264e37ddd3a20e1c6c15e28bcd5c
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/__init__.py
@@ -0,0 +1,134 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_tf_available,
+ is_tokenizers_available,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
+ "convert_funnel_original_tf_checkpoint_to_pytorch": [],
+ "tokenization_funnel": ["FunnelTokenizer"],
+}
+
+try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_funnel_fast"] = ["FunnelTokenizerFast"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_funnel"] = [
+ "FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "FunnelBaseModel",
+ "FunnelForMaskedLM",
+ "FunnelForMultipleChoice",
+ "FunnelForPreTraining",
+ "FunnelForQuestionAnswering",
+ "FunnelForSequenceClassification",
+ "FunnelForTokenClassification",
+ "FunnelModel",
+ "FunnelPreTrainedModel",
+ "load_tf_weights_in_funnel",
+ ]
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_funnel"] = [
+ "TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TFFunnelBaseModel",
+ "TFFunnelForMaskedLM",
+ "TFFunnelForMultipleChoice",
+ "TFFunnelForPreTraining",
+ "TFFunnelForQuestionAnswering",
+ "TFFunnelForSequenceClassification",
+ "TFFunnelForTokenClassification",
+ "TFFunnelModel",
+ "TFFunnelPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
+ from .tokenization_funnel import FunnelTokenizer
+
+ try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_funnel_fast import FunnelTokenizerFast
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_funnel import (
+ FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
+ FunnelBaseModel,
+ FunnelForMaskedLM,
+ FunnelForMultipleChoice,
+ FunnelForPreTraining,
+ FunnelForQuestionAnswering,
+ FunnelForSequenceClassification,
+ FunnelForTokenClassification,
+ FunnelModel,
+ FunnelPreTrainedModel,
+ load_tf_weights_in_funnel,
+ )
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_funnel import (
+ TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TFFunnelBaseModel,
+ TFFunnelForMaskedLM,
+ TFFunnelForMultipleChoice,
+ TFFunnelForPreTraining,
+ TFFunnelForQuestionAnswering,
+ TFFunnelForSequenceClassification,
+ TFFunnelForTokenClassification,
+ TFFunnelModel,
+ TFFunnelPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/configuration_funnel.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/configuration_funnel.py
new file mode 100644
index 0000000000000000000000000000000000000000..0b49c22fb4c345fa2e997c5bd5eaa865c680068f
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/configuration_funnel.py
@@ -0,0 +1,166 @@
+# coding=utf-8
+# Copyright 2020, Hugging Face
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Funnel Transformer model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class FunnelConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`FunnelModel`] or a [`TFBertModel`]. It is used to
+ instantiate a Funnel Transformer model according to the specified arguments, defining the model architecture.
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the Funnel
+ Transformer [funnel-transformer/small](https://huggingface.co/funnel-transformer/small) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 30522):
+ Vocabulary size of the Funnel transformer. Defines the number of different tokens that can be represented
+ by the `inputs_ids` passed when calling [`FunnelModel`] or [`TFFunnelModel`].
+ block_sizes (`List[int]`, *optional*, defaults to `[4, 4, 4]`):
+ The sizes of the blocks used in the model.
+ block_repeats (`List[int]`, *optional*):
+ If passed along, each layer of each block is repeated the number of times indicated.
+ num_decoder_layers (`int`, *optional*, defaults to 2):
+ The number of layers in the decoder (when not using the base model).
+ d_model (`int`, *optional*, defaults to 768):
+ Dimensionality of the model's hidden states.
+ n_head (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ d_head (`int`, *optional*, defaults to 64):
+ Dimensionality of the model's heads.
+ d_inner (`int`, *optional*, defaults to 3072):
+ Inner dimension in the feed-forward blocks.
+ hidden_act (`str` or `callable`, *optional*, defaults to `"gelu_new"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ hidden_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for the attention probabilities.
+ activation_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout probability used between the two layers of the feed-forward blocks.
+ initializer_range (`float`, *optional*, defaults to 0.1):
+ The upper bound of the *uniform initializer* for initializing all weight matrices in attention layers.
+ initializer_std (`float`, *optional*):
+ The standard deviation of the *normal initializer* for initializing the embedding matrix and the weight of
+ linear layers. Will default to 1 for the embedding matrix and the value given by Xavier initialization for
+ linear layers.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-09):
+ The epsilon used by the layer normalization layers.
+ pooling_type (`str`, *optional*, defaults to `"mean"`):
+ Possible values are `"mean"` or `"max"`. The way pooling is performed at the beginning of each block.
+ attention_type (`str`, *optional*, defaults to `"relative_shift"`):
+ Possible values are `"relative_shift"` or `"factorized"`. The former is faster on CPU/GPU while the latter
+ is faster on TPU.
+ separate_cls (`bool`, *optional*, defaults to `True`):
+ Whether or not to separate the cls token when applying pooling.
+ truncate_seq (`bool`, *optional*, defaults to `True`):
+ When using `separate_cls`, whether or not to truncate the last token when pooling, to avoid getting a
+ sequence length that is not a multiple of 2.
+ pool_q_only (`bool`, *optional*, defaults to `True`):
+ Whether or not to apply the pooling only to the query or to query, key and values for the attention layers.
+ """
+
+ model_type = "funnel"
+ attribute_map = {
+ "hidden_size": "d_model",
+ "num_attention_heads": "n_head",
+ }
+
+ def __init__(
+ self,
+ vocab_size=30522,
+ block_sizes=[4, 4, 4],
+ block_repeats=None,
+ num_decoder_layers=2,
+ d_model=768,
+ n_head=12,
+ d_head=64,
+ d_inner=3072,
+ hidden_act="gelu_new",
+ hidden_dropout=0.1,
+ attention_dropout=0.1,
+ activation_dropout=0.0,
+ initializer_range=0.1,
+ initializer_std=None,
+ layer_norm_eps=1e-9,
+ pooling_type="mean",
+ attention_type="relative_shift",
+ separate_cls=True,
+ truncate_seq=True,
+ pool_q_only=True,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.block_sizes = block_sizes
+ self.block_repeats = [1] * len(block_sizes) if block_repeats is None else block_repeats
+ assert len(block_sizes) == len(
+ self.block_repeats
+ ), "`block_sizes` and `block_repeats` should have the same length."
+ self.num_decoder_layers = num_decoder_layers
+ self.d_model = d_model
+ self.n_head = n_head
+ self.d_head = d_head
+ self.d_inner = d_inner
+ self.hidden_act = hidden_act
+ self.hidden_dropout = hidden_dropout
+ self.attention_dropout = attention_dropout
+ self.activation_dropout = activation_dropout
+ self.initializer_range = initializer_range
+ self.initializer_std = initializer_std
+ self.layer_norm_eps = layer_norm_eps
+ assert pooling_type in [
+ "mean",
+ "max",
+ ], f"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."
+ self.pooling_type = pooling_type
+ assert attention_type in [
+ "relative_shift",
+ "factorized",
+ ], f"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."
+ self.attention_type = attention_type
+ self.separate_cls = separate_cls
+ self.truncate_seq = truncate_seq
+ self.pool_q_only = pool_q_only
+
+ super().__init__(**kwargs)
+
+ @property
+ def num_hidden_layers(self):
+ return sum(self.block_sizes)
+
+ @num_hidden_layers.setter
+ def num_hidden_layers(self, value):
+ raise NotImplementedError(
+ "This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`."
+ )
+
+ @property
+ def num_blocks(self):
+ return len(self.block_sizes)
+
+ @num_blocks.setter
+ def num_blocks(self, value):
+ raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`.")
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/convert_funnel_original_tf_checkpoint_to_pytorch.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/convert_funnel_original_tf_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..848101f083582bafa26e58c87aaa612502f3f79c
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/convert_funnel_original_tf_checkpoint_to_pytorch.py
@@ -0,0 +1,65 @@
+# coding=utf-8
+# Copyright 2020 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert Funnel checkpoint."""
+
+
+import argparse
+
+import torch
+
+from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
+from transformers.utils import logging
+
+
+logging.set_verbosity_info()
+
+
+def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path, base_model):
+ # Initialise PyTorch model
+ config = FunnelConfig.from_json_file(config_file)
+ print(f"Building PyTorch model from configuration: {config}")
+ model = FunnelBaseModel(config) if base_model else FunnelModel(config)
+
+ # Load weights from tf checkpoint
+ load_tf_weights_in_funnel(model, config, tf_checkpoint_path)
+
+ # Save pytorch-model
+ print(f"Save PyTorch model to {pytorch_dump_path}")
+ torch.save(model.state_dict(), pytorch_dump_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
+ )
+ parser.add_argument(
+ "--config_file",
+ default=None,
+ type=str,
+ required=True,
+ help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
+ )
+ parser.add_argument(
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
+ )
+ parser.add_argument(
+ "--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
+ )
+ args = parser.parse_args()
+ convert_tf_checkpoint_to_pytorch(
+ args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/modeling_funnel.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/modeling_funnel.py
new file mode 100644
index 0000000000000000000000000000000000000000..ce0c7789487d8fa7f376f383a40958f9aeb3fb37
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/modeling_funnel.py
@@ -0,0 +1,1599 @@
+# coding=utf-8
+# Copyright 2020-present Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch Funnel Transformer model."""
+
+import os
+from dataclasses import dataclass
+from typing import List, Optional, Tuple, Union
+
+import numpy as np
+import torch
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import (
+ BaseModelOutput,
+ MaskedLMOutput,
+ MultipleChoiceModelOutput,
+ QuestionAnsweringModelOutput,
+ SequenceClassifierOutput,
+ TokenClassifierOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_funnel import FunnelConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "FunnelConfig"
+_CHECKPOINT_FOR_DOC = "funnel-transformer/small"
+
+
+from ..deprecated._archive_maps import FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+INF = 1e6
+
+
+def load_tf_weights_in_funnel(model, config, tf_checkpoint_path):
+ """Load tf checkpoints in a pytorch model."""
+ try:
+ import re
+
+ import numpy as np
+ import tensorflow as tf
+ except ImportError:
+ logger.error(
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
+ "https://www.tensorflow.org/install/ for installation instructions."
+ )
+ raise
+ tf_path = os.path.abspath(tf_checkpoint_path)
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
+ # Load weights from TF model
+ init_vars = tf.train.list_variables(tf_path)
+ names = []
+ arrays = []
+ for name, shape in init_vars:
+ logger.info(f"Loading TF weight {name} with shape {shape}")
+ array = tf.train.load_variable(tf_path, name)
+ names.append(name)
+ arrays.append(array)
+
+ _layer_map = {
+ "k": "k_head",
+ "q": "q_head",
+ "v": "v_head",
+ "o": "post_proj",
+ "layer_1": "linear_1",
+ "layer_2": "linear_2",
+ "rel_attn": "attention",
+ "ff": "ffn",
+ "kernel": "weight",
+ "gamma": "weight",
+ "beta": "bias",
+ "lookup_table": "weight",
+ "word_embedding": "word_embeddings",
+ "input": "embeddings",
+ }
+
+ for name, array in zip(names, arrays):
+ name = name.split("/")
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
+ # which are not required for using pretrained model
+ if any(
+ n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
+ for n in name
+ ):
+ logger.info(f"Skipping {'/'.join(name)}")
+ continue
+ if name[0] == "generator":
+ continue
+ pointer = model
+ skipped = False
+ for m_name in name[1:]:
+ if not isinstance(pointer, FunnelPositionwiseFFN) and re.fullmatch(r"layer_\d+", m_name):
+ layer_index = int(re.search(r"layer_(\d+)", m_name).groups()[0])
+ if layer_index < config.num_hidden_layers:
+ block_idx = 0
+ while layer_index >= config.block_sizes[block_idx]:
+ layer_index -= config.block_sizes[block_idx]
+ block_idx += 1
+ pointer = pointer.blocks[block_idx][layer_index]
+ else:
+ layer_index -= config.num_hidden_layers
+ pointer = pointer.layers[layer_index]
+ elif m_name == "r" and isinstance(pointer, FunnelRelMultiheadAttention):
+ pointer = pointer.r_kernel
+ break
+ elif m_name in _layer_map:
+ pointer = getattr(pointer, _layer_map[m_name])
+ else:
+ try:
+ pointer = getattr(pointer, m_name)
+ except AttributeError:
+ print(f"Skipping {'/'.join(name)}", array.shape)
+ skipped = True
+ break
+ if not skipped:
+ if len(pointer.shape) != len(array.shape):
+ array = array.reshape(pointer.shape)
+ if m_name == "kernel":
+ array = np.transpose(array)
+ pointer.data = torch.from_numpy(array)
+
+ return model
+
+
+class FunnelEmbeddings(nn.Module):
+ def __init__(self, config: FunnelConfig) -> None:
+ super().__init__()
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ self.layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout)
+
+ def forward(
+ self, input_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None
+ ) -> torch.Tensor:
+ if inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+ embeddings = self.layer_norm(inputs_embeds)
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+
+class FunnelAttentionStructure(nn.Module):
+ """
+ Contains helpers for `FunnelRelMultiheadAttention `.
+ """
+
+ cls_token_type_id: int = 2
+
+ def __init__(self, config: FunnelConfig) -> None:
+ super().__init__()
+ self.config = config
+ self.sin_dropout = nn.Dropout(config.hidden_dropout)
+ self.cos_dropout = nn.Dropout(config.hidden_dropout)
+ # Track where we are at in terms of pooling from the original input, e.g., by how much the sequence length was
+ # divided.
+ self.pooling_mult = None
+
+ def init_attention_inputs(
+ self,
+ inputs_embeds: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ ) -> Tuple[torch.Tensor]:
+ """Returns the attention inputs associated to the inputs of the model."""
+ # inputs_embeds has shape batch_size x seq_len x d_model
+ # attention_mask and token_type_ids have shape batch_size x seq_len
+ self.pooling_mult = 1
+ self.seq_len = seq_len = inputs_embeds.size(1)
+ position_embeds = self.get_position_embeds(seq_len, inputs_embeds.dtype, inputs_embeds.device)
+ token_type_mat = self.token_type_ids_to_mat(token_type_ids) if token_type_ids is not None else None
+ cls_mask = (
+ nn.functional.pad(inputs_embeds.new_ones([seq_len - 1, seq_len - 1]), (1, 0, 1, 0))
+ if self.config.separate_cls
+ else None
+ )
+ return (position_embeds, token_type_mat, attention_mask, cls_mask)
+
+ def token_type_ids_to_mat(self, token_type_ids: torch.Tensor) -> torch.Tensor:
+ """Convert `token_type_ids` to `token_type_mat`."""
+ token_type_mat = token_type_ids[:, :, None] == token_type_ids[:, None]
+ # Treat as in the same segment as both A & B
+ cls_ids = token_type_ids == self.cls_token_type_id
+ cls_mat = cls_ids[:, :, None] | cls_ids[:, None]
+ return cls_mat | token_type_mat
+
+ def get_position_embeds(
+ self, seq_len: int, dtype: torch.dtype, device: torch.device
+ ) -> Union[Tuple[torch.Tensor], List[List[torch.Tensor]]]:
+ """
+ Create and cache inputs related to relative position encoding. Those are very different depending on whether we
+ are using the factorized or the relative shift attention:
+
+ For the factorized attention, it returns the matrices (phi, pi, psi, omega) used in the paper, appendix A.2.2,
+ final formula.
+
+ For the relative shift attention, it returns all possible vectors R used in the paper, appendix A.2.1, final
+ formula.
+
+ Paper link: https://arxiv.org/abs/2006.03236
+ """
+ d_model = self.config.d_model
+ if self.config.attention_type == "factorized":
+ # Notations from the paper, appending A.2.2, final formula.
+ # We need to create and return the matrices phi, psi, pi and omega.
+ pos_seq = torch.arange(0, seq_len, 1.0, dtype=torch.int64, device=device).to(dtype)
+ freq_seq = torch.arange(0, d_model // 2, 1.0, dtype=torch.int64, device=device).to(dtype)
+ inv_freq = 1 / (10000 ** (freq_seq / (d_model // 2)))
+ sinusoid = pos_seq[:, None] * inv_freq[None]
+ sin_embed = torch.sin(sinusoid)
+ sin_embed_d = self.sin_dropout(sin_embed)
+ cos_embed = torch.cos(sinusoid)
+ cos_embed_d = self.cos_dropout(cos_embed)
+ # This is different from the formula on the paper...
+ phi = torch.cat([sin_embed_d, sin_embed_d], dim=-1)
+ psi = torch.cat([cos_embed, sin_embed], dim=-1)
+ pi = torch.cat([cos_embed_d, cos_embed_d], dim=-1)
+ omega = torch.cat([-sin_embed, cos_embed], dim=-1)
+ return (phi, pi, psi, omega)
+ else:
+ # Notations from the paper, appending A.2.1, final formula.
+ # We need to create and return all the possible vectors R for all blocks and shifts.
+ freq_seq = torch.arange(0, d_model // 2, 1.0, dtype=torch.int64, device=device).to(dtype)
+ inv_freq = 1 / (10000 ** (freq_seq / (d_model // 2)))
+ # Maximum relative positions for the first input
+ rel_pos_id = torch.arange(-seq_len * 2, seq_len * 2, 1.0, dtype=torch.int64, device=device).to(dtype)
+ zero_offset = seq_len * 2
+ sinusoid = rel_pos_id[:, None] * inv_freq[None]
+ sin_embed = self.sin_dropout(torch.sin(sinusoid))
+ cos_embed = self.cos_dropout(torch.cos(sinusoid))
+ pos_embed = torch.cat([sin_embed, cos_embed], dim=-1)
+
+ pos = torch.arange(0, seq_len, dtype=torch.int64, device=device).to(dtype)
+ pooled_pos = pos
+ position_embeds_list = []
+ for block_index in range(0, self.config.num_blocks):
+ # For each block with block_index > 0, we need two types position embeddings:
+ # - Attention(pooled-q, unpooled-kv)
+ # - Attention(pooled-q, pooled-kv)
+ # For block_index = 0 we only need the second one and leave the first one as None.
+
+ # First type
+ if block_index == 0:
+ position_embeds_pooling = None
+ else:
+ pooled_pos = self.stride_pool_pos(pos, block_index)
+
+ # construct rel_pos_id
+ stride = 2 ** (block_index - 1)
+ rel_pos = self.relative_pos(pos, stride, pooled_pos, shift=2)
+ rel_pos = rel_pos[:, None] + zero_offset
+ rel_pos = rel_pos.expand(rel_pos.size(0), d_model)
+ position_embeds_pooling = torch.gather(pos_embed, 0, rel_pos)
+
+ # Second type
+ pos = pooled_pos
+ stride = 2**block_index
+ rel_pos = self.relative_pos(pos, stride)
+
+ rel_pos = rel_pos[:, None] + zero_offset
+ rel_pos = rel_pos.expand(rel_pos.size(0), d_model)
+ position_embeds_no_pooling = torch.gather(pos_embed, 0, rel_pos)
+
+ position_embeds_list.append([position_embeds_no_pooling, position_embeds_pooling])
+ return position_embeds_list
+
+ def stride_pool_pos(self, pos_id: torch.Tensor, block_index: int):
+ """
+ Pool `pos_id` while keeping the cls token separate (if `config.separate_cls=True`).
+ """
+ if self.config.separate_cls:
+ # Under separate , we treat the as the first token in
+ # the previous block of the 1st real block. Since the 1st real
+ # block always has position 1, the position of the previous block
+ # will be at `1 - 2 ** block_index`.
+ cls_pos = pos_id.new_tensor([-(2**block_index) + 1])
+ pooled_pos_id = pos_id[1:-1] if self.config.truncate_seq else pos_id[1:]
+ return torch.cat([cls_pos, pooled_pos_id[::2]], 0)
+ else:
+ return pos_id[::2]
+
+ def relative_pos(self, pos: torch.Tensor, stride: int, pooled_pos=None, shift: int = 1) -> torch.Tensor:
+ """
+ Build the relative positional vector between `pos` and `pooled_pos`.
+ """
+ if pooled_pos is None:
+ pooled_pos = pos
+
+ ref_point = pooled_pos[0] - pos[0]
+ num_remove = shift * len(pooled_pos)
+ max_dist = ref_point + num_remove * stride
+ min_dist = pooled_pos[0] - pos[-1]
+
+ return torch.arange(max_dist, min_dist - 1, -stride, dtype=torch.long, device=pos.device)
+
+ def stride_pool(
+ self,
+ tensor: Union[torch.Tensor, Tuple[torch.Tensor], List[torch.Tensor]],
+ axis: Union[int, Tuple[int], List[int]],
+ ) -> torch.Tensor:
+ """
+ Perform pooling by stride slicing the tensor along the given axis.
+ """
+ if tensor is None:
+ return None
+
+ # Do the stride pool recursively if axis is a list or a tuple of ints.
+ if isinstance(axis, (list, tuple)):
+ for ax in axis:
+ tensor = self.stride_pool(tensor, ax)
+ return tensor
+
+ # Do the stride pool recursively if tensor is a list or tuple of tensors.
+ if isinstance(tensor, (tuple, list)):
+ return type(tensor)(self.stride_pool(x, axis) for x in tensor)
+
+ # Deal with negative axis
+ axis %= tensor.ndim
+
+ axis_slice = (
+ slice(None, -1, 2) if self.config.separate_cls and self.config.truncate_seq else slice(None, None, 2)
+ )
+ enc_slice = [slice(None)] * axis + [axis_slice]
+ if self.config.separate_cls:
+ cls_slice = [slice(None)] * axis + [slice(None, 1)]
+ tensor = torch.cat([tensor[cls_slice], tensor], axis=axis)
+ return tensor[enc_slice]
+
+ def pool_tensor(
+ self, tensor: Union[torch.Tensor, Tuple[torch.Tensor], List[torch.Tensor]], mode: str = "mean", stride: int = 2
+ ) -> torch.Tensor:
+ """Apply 1D pooling to a tensor of size [B x T (x H)]."""
+ if tensor is None:
+ return None
+
+ # Do the pool recursively if tensor is a list or tuple of tensors.
+ if isinstance(tensor, (tuple, list)):
+ return type(tensor)(self.pool_tensor(tensor, mode=mode, stride=stride) for x in tensor)
+
+ if self.config.separate_cls:
+ suffix = tensor[:, :-1] if self.config.truncate_seq else tensor
+ tensor = torch.cat([tensor[:, :1], suffix], dim=1)
+
+ ndim = tensor.ndim
+ if ndim == 2:
+ tensor = tensor[:, None, :, None]
+ elif ndim == 3:
+ tensor = tensor[:, None, :, :]
+ # Stride is applied on the second-to-last dimension.
+ stride = (stride, 1)
+
+ if mode == "mean":
+ tensor = nn.functional.avg_pool2d(tensor, stride, stride=stride, ceil_mode=True)
+ elif mode == "max":
+ tensor = nn.functional.max_pool2d(tensor, stride, stride=stride, ceil_mode=True)
+ elif mode == "min":
+ tensor = -nn.functional.max_pool2d(-tensor, stride, stride=stride, ceil_mode=True)
+ else:
+ raise NotImplementedError("The supported modes are 'mean', 'max' and 'min'.")
+
+ if ndim == 2:
+ return tensor[:, 0, :, 0]
+ elif ndim == 3:
+ return tensor[:, 0]
+ return tensor
+
+ def pre_attention_pooling(
+ self, output, attention_inputs: Tuple[torch.Tensor]
+ ) -> Tuple[torch.Tensor, Tuple[torch.Tensor]]:
+ """Pool `output` and the proper parts of `attention_inputs` before the attention layer."""
+ position_embeds, token_type_mat, attention_mask, cls_mask = attention_inputs
+ if self.config.pool_q_only:
+ if self.config.attention_type == "factorized":
+ position_embeds = self.stride_pool(position_embeds[:2], 0) + position_embeds[2:]
+ token_type_mat = self.stride_pool(token_type_mat, 1)
+ cls_mask = self.stride_pool(cls_mask, 0)
+ output = self.pool_tensor(output, mode=self.config.pooling_type)
+ else:
+ self.pooling_mult *= 2
+ if self.config.attention_type == "factorized":
+ position_embeds = self.stride_pool(position_embeds, 0)
+ token_type_mat = self.stride_pool(token_type_mat, [1, 2])
+ cls_mask = self.stride_pool(cls_mask, [1, 2])
+ attention_mask = self.pool_tensor(attention_mask, mode="min")
+ output = self.pool_tensor(output, mode=self.config.pooling_type)
+ attention_inputs = (position_embeds, token_type_mat, attention_mask, cls_mask)
+ return output, attention_inputs
+
+ def post_attention_pooling(self, attention_inputs: Tuple[torch.Tensor]) -> Tuple[torch.Tensor]:
+ """Pool the proper parts of `attention_inputs` after the attention layer."""
+ position_embeds, token_type_mat, attention_mask, cls_mask = attention_inputs
+ if self.config.pool_q_only:
+ self.pooling_mult *= 2
+ if self.config.attention_type == "factorized":
+ position_embeds = position_embeds[:2] + self.stride_pool(position_embeds[2:], 0)
+ token_type_mat = self.stride_pool(token_type_mat, 2)
+ cls_mask = self.stride_pool(cls_mask, 1)
+ attention_mask = self.pool_tensor(attention_mask, mode="min")
+ attention_inputs = (position_embeds, token_type_mat, attention_mask, cls_mask)
+ return attention_inputs
+
+
+def _relative_shift_gather(positional_attn: torch.Tensor, context_len: int, shift: int) -> torch.Tensor:
+ batch_size, n_head, seq_len, max_rel_len = positional_attn.shape
+ # max_rel_len = 2 * context_len + shift -1 is the numbers of possible relative positions i-j
+
+ # What's next is the same as doing the following gather, which might be clearer code but less efficient.
+ # idxs = context_len + torch.arange(0, context_len).unsqueeze(0) - torch.arange(0, seq_len).unsqueeze(1)
+ # # matrix of context_len + i-j
+ # return positional_attn.gather(3, idxs.expand([batch_size, n_head, context_len, context_len]))
+
+ positional_attn = torch.reshape(positional_attn, [batch_size, n_head, max_rel_len, seq_len])
+ positional_attn = positional_attn[:, :, shift:, :]
+ positional_attn = torch.reshape(positional_attn, [batch_size, n_head, seq_len, max_rel_len - shift])
+ positional_attn = positional_attn[..., :context_len]
+ return positional_attn
+
+
+class FunnelRelMultiheadAttention(nn.Module):
+ def __init__(self, config: FunnelConfig, block_index: int) -> None:
+ super().__init__()
+ self.config = config
+ self.block_index = block_index
+ d_model, n_head, d_head = config.d_model, config.n_head, config.d_head
+
+ self.hidden_dropout = nn.Dropout(config.hidden_dropout)
+ self.attention_dropout = nn.Dropout(config.attention_dropout)
+
+ self.q_head = nn.Linear(d_model, n_head * d_head, bias=False)
+ self.k_head = nn.Linear(d_model, n_head * d_head)
+ self.v_head = nn.Linear(d_model, n_head * d_head)
+
+ self.r_w_bias = nn.Parameter(torch.zeros([n_head, d_head]))
+ self.r_r_bias = nn.Parameter(torch.zeros([n_head, d_head]))
+ self.r_kernel = nn.Parameter(torch.zeros([d_model, n_head, d_head]))
+ self.r_s_bias = nn.Parameter(torch.zeros([n_head, d_head]))
+ self.seg_embed = nn.Parameter(torch.zeros([2, n_head, d_head]))
+
+ self.post_proj = nn.Linear(n_head * d_head, d_model)
+ self.layer_norm = nn.LayerNorm(d_model, eps=config.layer_norm_eps)
+ self.scale = 1.0 / (d_head**0.5)
+
+ def relative_positional_attention(self, position_embeds, q_head, context_len, cls_mask=None):
+ """Relative attention score for the positional encodings"""
+ # q_head has shape batch_size x sea_len x n_head x d_head
+ if self.config.attention_type == "factorized":
+ # Notations from the paper, appending A.2.2, final formula (https://arxiv.org/abs/2006.03236)
+ # phi and pi have shape seq_len x d_model, psi and omega have shape context_len x d_model
+ phi, pi, psi, omega = position_embeds
+ # Shape n_head x d_head
+ u = self.r_r_bias * self.scale
+ # Shape d_model x n_head x d_head
+ w_r = self.r_kernel
+
+ # Shape batch_size x sea_len x n_head x d_model
+ q_r_attention = torch.einsum("binh,dnh->bind", q_head + u, w_r)
+ q_r_attention_1 = q_r_attention * phi[:, None]
+ q_r_attention_2 = q_r_attention * pi[:, None]
+
+ # Shape batch_size x n_head x seq_len x context_len
+ positional_attn = torch.einsum("bind,jd->bnij", q_r_attention_1, psi) + torch.einsum(
+ "bind,jd->bnij", q_r_attention_2, omega
+ )
+ else:
+ shift = 2 if q_head.shape[1] != context_len else 1
+ # Notations from the paper, appending A.2.1, final formula (https://arxiv.org/abs/2006.03236)
+ # Grab the proper positional encoding, shape max_rel_len x d_model
+ r = position_embeds[self.block_index][shift - 1]
+ # Shape n_head x d_head
+ v = self.r_r_bias * self.scale
+ # Shape d_model x n_head x d_head
+ w_r = self.r_kernel
+
+ # Shape max_rel_len x n_head x d_model
+ r_head = torch.einsum("td,dnh->tnh", r, w_r)
+ # Shape batch_size x n_head x seq_len x max_rel_len
+ positional_attn = torch.einsum("binh,tnh->bnit", q_head + v, r_head)
+ # Shape batch_size x n_head x seq_len x context_len
+ positional_attn = _relative_shift_gather(positional_attn, context_len, shift)
+
+ if cls_mask is not None:
+ positional_attn *= cls_mask
+ return positional_attn
+
+ def relative_token_type_attention(self, token_type_mat, q_head, cls_mask=None):
+ """Relative attention score for the token_type_ids"""
+ if token_type_mat is None:
+ return 0
+ batch_size, seq_len, context_len = token_type_mat.shape
+ # q_head has shape batch_size x seq_len x n_head x d_head
+ # Shape n_head x d_head
+ r_s_bias = self.r_s_bias * self.scale
+
+ # Shape batch_size x n_head x seq_len x 2
+ token_type_bias = torch.einsum("bind,snd->bnis", q_head + r_s_bias, self.seg_embed)
+ # Shape batch_size x n_head x seq_len x context_len
+ token_type_mat = token_type_mat[:, None].expand([batch_size, q_head.shape[2], seq_len, context_len])
+ # Shapes batch_size x n_head x seq_len
+ diff_token_type, same_token_type = torch.split(token_type_bias, 1, dim=-1)
+ # Shape batch_size x n_head x seq_len x context_len
+ token_type_attn = torch.where(
+ token_type_mat, same_token_type.expand(token_type_mat.shape), diff_token_type.expand(token_type_mat.shape)
+ )
+
+ if cls_mask is not None:
+ token_type_attn *= cls_mask
+ return token_type_attn
+
+ def forward(
+ self,
+ query: torch.Tensor,
+ key: torch.Tensor,
+ value: torch.Tensor,
+ attention_inputs: Tuple[torch.Tensor],
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, ...]:
+ # query has shape batch_size x seq_len x d_model
+ # key and value have shapes batch_size x context_len x d_model
+ position_embeds, token_type_mat, attention_mask, cls_mask = attention_inputs
+
+ batch_size, seq_len, _ = query.shape
+ context_len = key.shape[1]
+ n_head, d_head = self.config.n_head, self.config.d_head
+
+ # Shape batch_size x seq_len x n_head x d_head
+ q_head = self.q_head(query).view(batch_size, seq_len, n_head, d_head)
+ # Shapes batch_size x context_len x n_head x d_head
+ k_head = self.k_head(key).view(batch_size, context_len, n_head, d_head)
+ v_head = self.v_head(value).view(batch_size, context_len, n_head, d_head)
+
+ q_head = q_head * self.scale
+ # Shape n_head x d_head
+ r_w_bias = self.r_w_bias * self.scale
+ # Shapes batch_size x n_head x seq_len x context_len
+ content_score = torch.einsum("bind,bjnd->bnij", q_head + r_w_bias, k_head)
+ positional_attn = self.relative_positional_attention(position_embeds, q_head, context_len, cls_mask)
+ token_type_attn = self.relative_token_type_attention(token_type_mat, q_head, cls_mask)
+
+ # merge attention scores
+ attn_score = content_score + positional_attn + token_type_attn
+
+ # precision safe in case of mixed precision training
+ dtype = attn_score.dtype
+ attn_score = attn_score.float()
+ # perform masking
+ if attention_mask is not None:
+ attn_score = attn_score - INF * (1 - attention_mask[:, None, None].float())
+ # attention probability
+ attn_prob = torch.softmax(attn_score, dim=-1, dtype=dtype)
+ attn_prob = self.attention_dropout(attn_prob)
+
+ # attention output, shape batch_size x seq_len x n_head x d_head
+ attn_vec = torch.einsum("bnij,bjnd->bind", attn_prob, v_head)
+
+ # Shape shape batch_size x seq_len x d_model
+ attn_out = self.post_proj(attn_vec.reshape(batch_size, seq_len, n_head * d_head))
+ attn_out = self.hidden_dropout(attn_out)
+
+ output = self.layer_norm(query + attn_out)
+ return (output, attn_prob) if output_attentions else (output,)
+
+
+class FunnelPositionwiseFFN(nn.Module):
+ def __init__(self, config: FunnelConfig) -> None:
+ super().__init__()
+ self.linear_1 = nn.Linear(config.d_model, config.d_inner)
+ self.activation_function = ACT2FN[config.hidden_act]
+ self.activation_dropout = nn.Dropout(config.activation_dropout)
+ self.linear_2 = nn.Linear(config.d_inner, config.d_model)
+ self.dropout = nn.Dropout(config.hidden_dropout)
+ self.layer_norm = nn.LayerNorm(config.d_model, config.layer_norm_eps)
+
+ def forward(self, hidden: torch.Tensor) -> torch.Tensor:
+ h = self.linear_1(hidden)
+ h = self.activation_function(h)
+ h = self.activation_dropout(h)
+ h = self.linear_2(h)
+ h = self.dropout(h)
+ return self.layer_norm(hidden + h)
+
+
+class FunnelLayer(nn.Module):
+ def __init__(self, config: FunnelConfig, block_index: int) -> None:
+ super().__init__()
+ self.attention = FunnelRelMultiheadAttention(config, block_index)
+ self.ffn = FunnelPositionwiseFFN(config)
+
+ def forward(
+ self,
+ query: torch.Tensor,
+ key: torch.Tensor,
+ value: torch.Tensor,
+ attention_inputs,
+ output_attentions: bool = False,
+ ) -> Tuple:
+ attn = self.attention(query, key, value, attention_inputs, output_attentions=output_attentions)
+ output = self.ffn(attn[0])
+ return (output, attn[1]) if output_attentions else (output,)
+
+
+class FunnelEncoder(nn.Module):
+ def __init__(self, config: FunnelConfig) -> None:
+ super().__init__()
+ self.config = config
+ self.attention_structure = FunnelAttentionStructure(config)
+ self.blocks = nn.ModuleList(
+ [
+ nn.ModuleList([FunnelLayer(config, block_index) for _ in range(block_size)])
+ for block_index, block_size in enumerate(config.block_sizes)
+ ]
+ )
+
+ def forward(
+ self,
+ inputs_embeds: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ) -> Union[Tuple, BaseModelOutput]:
+ # The pooling is not implemented on long tensors, so we convert this mask.
+ attention_mask = attention_mask.type_as(inputs_embeds)
+ attention_inputs = self.attention_structure.init_attention_inputs(
+ inputs_embeds,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ )
+ hidden = inputs_embeds
+
+ all_hidden_states = (inputs_embeds,) if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ for block_index, block in enumerate(self.blocks):
+ pooling_flag = hidden.size(1) > (2 if self.config.separate_cls else 1)
+ pooling_flag = pooling_flag and block_index > 0
+ if pooling_flag:
+ pooled_hidden, attention_inputs = self.attention_structure.pre_attention_pooling(
+ hidden, attention_inputs
+ )
+ for layer_index, layer in enumerate(block):
+ for repeat_index in range(self.config.block_repeats[block_index]):
+ do_pooling = (repeat_index == 0) and (layer_index == 0) and pooling_flag
+ if do_pooling:
+ query = pooled_hidden
+ key = value = hidden if self.config.pool_q_only else pooled_hidden
+ else:
+ query = key = value = hidden
+ layer_output = layer(query, key, value, attention_inputs, output_attentions=output_attentions)
+ hidden = layer_output[0]
+ if do_pooling:
+ attention_inputs = self.attention_structure.post_attention_pooling(attention_inputs)
+
+ if output_attentions:
+ all_attentions = all_attentions + layer_output[1:]
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden, all_hidden_states, all_attentions] if v is not None)
+ return BaseModelOutput(last_hidden_state=hidden, hidden_states=all_hidden_states, attentions=all_attentions)
+
+
+def upsample(
+ x: torch.Tensor, stride: int, target_len: int, separate_cls: bool = True, truncate_seq: bool = False
+) -> torch.Tensor:
+ """
+ Upsample tensor `x` to match `target_len` by repeating the tokens `stride` time on the sequence length dimension.
+ """
+ if stride == 1:
+ return x
+ if separate_cls:
+ cls = x[:, :1]
+ x = x[:, 1:]
+ output = torch.repeat_interleave(x, repeats=stride, dim=1)
+ if separate_cls:
+ if truncate_seq:
+ output = nn.functional.pad(output, (0, 0, 0, stride - 1, 0, 0))
+ output = output[:, : target_len - 1]
+ output = torch.cat([cls, output], dim=1)
+ else:
+ output = output[:, :target_len]
+ return output
+
+
+class FunnelDecoder(nn.Module):
+ def __init__(self, config: FunnelConfig) -> None:
+ super().__init__()
+ self.config = config
+ self.attention_structure = FunnelAttentionStructure(config)
+ self.layers = nn.ModuleList([FunnelLayer(config, 0) for _ in range(config.num_decoder_layers)])
+
+ def forward(
+ self,
+ final_hidden: torch.Tensor,
+ first_block_hidden: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ) -> Union[Tuple, BaseModelOutput]:
+ upsampled_hidden = upsample(
+ final_hidden,
+ stride=2 ** (len(self.config.block_sizes) - 1),
+ target_len=first_block_hidden.shape[1],
+ separate_cls=self.config.separate_cls,
+ truncate_seq=self.config.truncate_seq,
+ )
+
+ hidden = upsampled_hidden + first_block_hidden
+ all_hidden_states = (hidden,) if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ attention_inputs = self.attention_structure.init_attention_inputs(
+ hidden,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ )
+
+ for layer in self.layers:
+ layer_output = layer(hidden, hidden, hidden, attention_inputs, output_attentions=output_attentions)
+ hidden = layer_output[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + layer_output[1:]
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden, all_hidden_states, all_attentions] if v is not None)
+ return BaseModelOutput(last_hidden_state=hidden, hidden_states=all_hidden_states, attentions=all_attentions)
+
+
+class FunnelDiscriminatorPredictions(nn.Module):
+ """Prediction module for the discriminator, made up of two dense layers."""
+
+ def __init__(self, config: FunnelConfig) -> None:
+ super().__init__()
+ self.config = config
+ self.dense = nn.Linear(config.d_model, config.d_model)
+ self.dense_prediction = nn.Linear(config.d_model, 1)
+
+ def forward(self, discriminator_hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(discriminator_hidden_states)
+ hidden_states = ACT2FN[self.config.hidden_act](hidden_states)
+ logits = self.dense_prediction(hidden_states).squeeze(-1)
+ return logits
+
+
+class FunnelPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = FunnelConfig
+ load_tf_weights = load_tf_weights_in_funnel
+ base_model_prefix = "funnel"
+
+ def _init_weights(self, module):
+ classname = module.__class__.__name__
+ if classname.find("Linear") != -1:
+ if getattr(module, "weight", None) is not None:
+ if self.config.initializer_std is None:
+ fan_out, fan_in = module.weight.shape
+ std = np.sqrt(1.0 / float(fan_in + fan_out))
+ else:
+ std = self.config.initializer_std
+ nn.init.normal_(module.weight, std=std)
+ if getattr(module, "bias", None) is not None:
+ nn.init.constant_(module.bias, 0.0)
+ elif classname == "FunnelRelMultiheadAttention":
+ nn.init.uniform_(module.r_w_bias, b=self.config.initializer_range)
+ nn.init.uniform_(module.r_r_bias, b=self.config.initializer_range)
+ nn.init.uniform_(module.r_kernel, b=self.config.initializer_range)
+ nn.init.uniform_(module.r_s_bias, b=self.config.initializer_range)
+ nn.init.uniform_(module.seg_embed, b=self.config.initializer_range)
+ elif classname == "FunnelEmbeddings":
+ std = 1.0 if self.config.initializer_std is None else self.config.initializer_std
+ nn.init.normal_(module.word_embeddings.weight, std=std)
+ if module.word_embeddings.padding_idx is not None:
+ module.word_embeddings.weight.data[module.padding_idx].zero_()
+
+
+class FunnelClassificationHead(nn.Module):
+ def __init__(self, config: FunnelConfig, n_labels: int) -> None:
+ super().__init__()
+ self.linear_hidden = nn.Linear(config.d_model, config.d_model)
+ self.dropout = nn.Dropout(config.hidden_dropout)
+ self.linear_out = nn.Linear(config.d_model, n_labels)
+
+ def forward(self, hidden: torch.Tensor) -> torch.Tensor:
+ hidden = self.linear_hidden(hidden)
+ hidden = torch.tanh(hidden)
+ hidden = self.dropout(hidden)
+ return self.linear_out(hidden)
+
+
+@dataclass
+class FunnelForPreTrainingOutput(ModelOutput):
+ """
+ Output type of [`FunnelForPreTraining`].
+
+ Args:
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
+ Total loss of the ELECTRA-style objective.
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
+ Prediction scores of the head (scores for each token before SoftMax).
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+FUNNEL_START_DOCSTRING = r"""
+
+ The Funnel Transformer model was proposed in [Funnel-Transformer: Filtering out Sequential Redundancy for Efficient
+ Language Processing](https://arxiv.org/abs/2006.03236) by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le.
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`FunnelConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+FUNNEL_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ """
+ The base Funnel Transformer Model transformer outputting raw hidden-states without upsampling head (also called
+ decoder) or any task-specific head on top.
+ """,
+ FUNNEL_START_DOCSTRING,
+)
+class FunnelBaseModel(FunnelPreTrainedModel):
+ def __init__(self, config: FunnelConfig) -> None:
+ super().__init__(config)
+
+ self.embeddings = FunnelEmbeddings(config)
+ self.encoder = FunnelEncoder(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self) -> nn.Embedding:
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, new_embeddings: nn.Embedding) -> None:
+ self.embeddings.word_embeddings = new_embeddings
+
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint="funnel-transformer/small-base",
+ output_type=BaseModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ if attention_mask is None:
+ attention_mask = torch.ones(input_shape, device=device)
+ if token_type_ids is None:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
+
+ # TODO: deal with head_mask
+ if inputs_embeds is None:
+ inputs_embeds = self.embeddings(input_ids)
+
+ encoder_outputs = self.encoder(
+ inputs_embeds,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ return encoder_outputs
+
+
+@add_start_docstrings(
+ "The bare Funnel Transformer Model transformer outputting raw hidden-states without any specific head on top.",
+ FUNNEL_START_DOCSTRING,
+)
+class FunnelModel(FunnelPreTrainedModel):
+ def __init__(self, config: FunnelConfig) -> None:
+ super().__init__(config)
+ self.config = config
+ self.embeddings = FunnelEmbeddings(config)
+ self.encoder = FunnelEncoder(config)
+ self.decoder = FunnelDecoder(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self) -> nn.Embedding:
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, new_embeddings: nn.Embedding) -> None:
+ self.embeddings.word_embeddings = new_embeddings
+
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ if attention_mask is None:
+ attention_mask = torch.ones(input_shape, device=device)
+ if token_type_ids is None:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
+
+ # TODO: deal with head_mask
+ if inputs_embeds is None:
+ inputs_embeds = self.embeddings(input_ids)
+
+ encoder_outputs = self.encoder(
+ inputs_embeds,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=True,
+ return_dict=return_dict,
+ )
+
+ decoder_outputs = self.decoder(
+ final_hidden=encoder_outputs[0],
+ first_block_hidden=encoder_outputs[1][self.config.block_sizes[0]],
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ if not return_dict:
+ idx = 0
+ outputs = (decoder_outputs[0],)
+ if output_hidden_states:
+ idx += 1
+ outputs = outputs + (encoder_outputs[1] + decoder_outputs[idx],)
+ if output_attentions:
+ idx += 1
+ outputs = outputs + (encoder_outputs[2] + decoder_outputs[idx],)
+ return outputs
+
+ return BaseModelOutput(
+ last_hidden_state=decoder_outputs[0],
+ hidden_states=(encoder_outputs.hidden_states + decoder_outputs.hidden_states)
+ if output_hidden_states
+ else None,
+ attentions=(encoder_outputs.attentions + decoder_outputs.attentions) if output_attentions else None,
+ )
+
+
+add_start_docstrings(
+ """
+ Funnel Transformer model with a binary classification head on top as used during pretraining for identifying
+ generated tokens.
+ """,
+ FUNNEL_START_DOCSTRING,
+)
+
+
+class FunnelForPreTraining(FunnelPreTrainedModel):
+ def __init__(self, config: FunnelConfig) -> None:
+ super().__init__(config)
+
+ self.funnel = FunnelModel(config)
+ self.discriminator_predictions = FunnelDiscriminatorPredictions(config)
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=FunnelForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, FunnelForPreTrainingOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the ELECTRA-style loss. Input should be a sequence of tokens (see `input_ids`
+ docstring) Indices should be in `[0, 1]`:
+
+ - 0 indicates the token is an original token,
+ - 1 indicates the token was replaced.
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, FunnelForPreTraining
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("funnel-transformer/small")
+ >>> model = FunnelForPreTraining.from_pretrained("funnel-transformer/small")
+
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
+ >>> logits = model(**inputs).logits
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ discriminator_hidden_states = self.funnel(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ discriminator_sequence_output = discriminator_hidden_states[0]
+
+ logits = self.discriminator_predictions(discriminator_sequence_output)
+
+ loss = None
+ if labels is not None:
+ loss_fct = nn.BCEWithLogitsLoss()
+ if attention_mask is not None:
+ active_loss = attention_mask.view(-1, discriminator_sequence_output.shape[1]) == 1
+ active_logits = logits.view(-1, discriminator_sequence_output.shape[1])[active_loss]
+ active_labels = labels[active_loss]
+ loss = loss_fct(active_logits, active_labels.float())
+ else:
+ loss = loss_fct(logits.view(-1, discriminator_sequence_output.shape[1]), labels.float())
+
+ if not return_dict:
+ output = (logits,) + discriminator_hidden_states[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return FunnelForPreTrainingOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=discriminator_hidden_states.hidden_states,
+ attentions=discriminator_hidden_states.attentions,
+ )
+
+
+@add_start_docstrings("""Funnel Transformer Model with a `language modeling` head on top.""", FUNNEL_START_DOCSTRING)
+class FunnelForMaskedLM(FunnelPreTrainedModel):
+ _tied_weights_keys = ["lm_head.weight"]
+
+ def __init__(self, config: FunnelConfig) -> None:
+ super().__init__(config)
+
+ self.funnel = FunnelModel(config)
+ self.lm_head = nn.Linear(config.d_model, config.vocab_size)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self) -> nn.Linear:
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings: nn.Embedding) -> None:
+ self.lm_head = new_embeddings
+
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=MaskedLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ mask="",
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, MaskedLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.funnel(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ last_hidden_state = outputs[0]
+ prediction_logits = self.lm_head(last_hidden_state)
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
+ masked_lm_loss = loss_fct(prediction_logits.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (prediction_logits,) + outputs[1:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return MaskedLMOutput(
+ loss=masked_lm_loss,
+ logits=prediction_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Funnel Transformer Model with a sequence classification/regression head on top (two linear layer on top of the
+ first timestep of the last hidden state) e.g. for GLUE tasks.
+ """,
+ FUNNEL_START_DOCSTRING,
+)
+class FunnelForSequenceClassification(FunnelPreTrainedModel):
+ def __init__(self, config: FunnelConfig) -> None:
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.config = config
+
+ self.funnel = FunnelBaseModel(config)
+ self.classifier = FunnelClassificationHead(config, config.num_labels)
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint="funnel-transformer/small-base",
+ output_type=SequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, SequenceClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.funnel(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ last_hidden_state = outputs[0]
+ pooled_output = last_hidden_state[:, 0]
+ logits = self.classifier(pooled_output)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Funnel Transformer Model with a multiple choice classification head on top (two linear layer on top of the first
+ timestep of the last hidden state, and a softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ FUNNEL_START_DOCSTRING,
+)
+class FunnelForMultipleChoice(FunnelPreTrainedModel):
+ def __init__(self, config: FunnelConfig) -> None:
+ super().__init__(config)
+
+ self.funnel = FunnelBaseModel(config)
+ self.classifier = FunnelClassificationHead(config, 1)
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint="funnel-transformer/small-base",
+ output_type=MultipleChoiceModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, MultipleChoiceModelOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
+ `input_ids` above)
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
+
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
+ inputs_embeds = (
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
+ if inputs_embeds is not None
+ else None
+ )
+
+ outputs = self.funnel(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ last_hidden_state = outputs[0]
+ pooled_output = last_hidden_state[:, 0]
+ logits = self.classifier(pooled_output)
+ reshaped_logits = logits.view(-1, num_choices)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(reshaped_logits, labels)
+
+ if not return_dict:
+ output = (reshaped_logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return MultipleChoiceModelOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Funnel Transformer Model with a token classification head on top (a linear layer on top of the hidden-states
+ output) e.g. for Named-Entity-Recognition (NER) tasks.
+ """,
+ FUNNEL_START_DOCSTRING,
+)
+class FunnelForTokenClassification(FunnelPreTrainedModel):
+ def __init__(self, config: FunnelConfig) -> None:
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.funnel = FunnelModel(config)
+ self.dropout = nn.Dropout(config.hidden_dropout)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, TokenClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.funnel(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ last_hidden_state = outputs[0]
+ last_hidden_state = self.dropout(last_hidden_state)
+ logits = self.classifier(last_hidden_state)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Funnel Transformer Model with a span classification head on top for extractive question-answering tasks like SQuAD
+ (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ FUNNEL_START_DOCSTRING,
+)
+class FunnelForQuestionAnswering(FunnelPreTrainedModel):
+ def __init__(self, config: FunnelConfig) -> None:
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.funnel = FunnelModel(config)
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=QuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ start_positions: Optional[torch.Tensor] = None,
+ end_positions: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.funnel(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ last_hidden_state = outputs[0]
+
+ logits = self.qa_outputs(last_hidden_state)
+ start_logits, end_logits = logits.split(1, dim=-1)
+ start_logits = start_logits.squeeze(-1).contiguous()
+ end_logits = end_logits.squeeze(-1).contiguous()
+
+ total_loss = None
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, split add a dimension
+ if len(start_positions.size()) > 1:
+ start_positions = start_positions.squeze(-1)
+ if len(end_positions.size()) > 1:
+ end_positions = end_positions.squeeze(-1)
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
+ ignored_index = start_logits.size(1)
+ start_positions = start_positions.clamp(0, ignored_index)
+ end_positions = end_positions.clamp(0, ignored_index)
+
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[1:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return QuestionAnsweringModelOutput(
+ loss=total_loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/modeling_tf_funnel.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/modeling_tf_funnel.py
new file mode 100644
index 0000000000000000000000000000000000000000..b50b96df1c54083d552e94f8d0c5e219a62d3e65
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/modeling_tf_funnel.py
@@ -0,0 +1,1871 @@
+# coding=utf-8
+# Copyright 2020-present Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" TF 2.0 Funnel model."""
+
+
+from __future__ import annotations
+
+import warnings
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ...activations_tf import get_tf_activation
+from ...modeling_tf_outputs import (
+ TFBaseModelOutput,
+ TFMaskedLMOutput,
+ TFMultipleChoiceModelOutput,
+ TFQuestionAnsweringModelOutput,
+ TFSequenceClassifierOutput,
+ TFTokenClassifierOutput,
+)
+from ...modeling_tf_utils import (
+ TFMaskedLanguageModelingLoss,
+ TFModelInputType,
+ TFMultipleChoiceLoss,
+ TFPreTrainedModel,
+ TFQuestionAnsweringLoss,
+ TFSequenceClassificationLoss,
+ TFTokenClassificationLoss,
+ get_initializer,
+ keras,
+ keras_serializable,
+ unpack_inputs,
+)
+from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_funnel import FunnelConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "FunnelConfig"
+
+
+from ..deprecated._archive_maps import TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+INF = 1e6
+
+
+class TFFunnelEmbeddings(keras.layers.Layer):
+ """Construct the embeddings from word, position and token_type embeddings."""
+
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.hidden_size = config.hidden_size
+ self.initializer_std = 1.0 if config.initializer_std is None else config.initializer_std
+
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout)
+
+ def build(self, input_shape=None):
+ with tf.name_scope("word_embeddings"):
+ self.weight = self.add_weight(
+ name="weight",
+ shape=[self.config.vocab_size, self.hidden_size],
+ initializer=get_initializer(initializer_range=self.initializer_std),
+ )
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.d_model])
+
+ def call(self, input_ids=None, inputs_embeds=None, training=False):
+ """
+ Applies embedding based on inputs tensor.
+
+ Returns:
+ final_embeddings (`tf.Tensor`): output embedding tensor.
+ """
+ assert not (input_ids is None and inputs_embeds is None)
+ assert not (input_ids is not None and inputs_embeds is not None)
+
+ if input_ids is not None:
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
+ inputs_embeds = tf.gather(self.weight, input_ids)
+
+ final_embeddings = self.LayerNorm(inputs=inputs_embeds)
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
+
+ return final_embeddings
+
+
+class TFFunnelAttentionStructure:
+ """
+ Contains helpers for `TFFunnelRelMultiheadAttention `.
+ """
+
+ cls_token_type_id: int = 2
+
+ def __init__(self, config):
+ self.d_model = config.d_model
+ self.attention_type = config.attention_type
+ self.num_blocks = config.num_blocks
+ self.separate_cls = config.separate_cls
+ self.truncate_seq = config.truncate_seq
+ self.pool_q_only = config.pool_q_only
+ self.pooling_type = config.pooling_type
+
+ self.sin_dropout = keras.layers.Dropout(config.hidden_dropout)
+ self.cos_dropout = keras.layers.Dropout(config.hidden_dropout)
+ # Track where we are at in terms of pooling from the original input, e.g., by how much the sequence length was
+ # divided.
+ self.pooling_mult = None
+
+ def init_attention_inputs(self, inputs_embeds, attention_mask=None, token_type_ids=None, training=False):
+ """Returns the attention inputs associated to the inputs of the model."""
+ # inputs_embeds has shape batch_size x seq_len x d_model
+ # attention_mask and token_type_ids have shape batch_size x seq_len
+ self.pooling_mult = 1
+ self.seq_len = seq_len = shape_list(inputs_embeds)[1]
+ position_embeds = self.get_position_embeds(seq_len, training=training)
+ token_type_mat = self.token_type_ids_to_mat(token_type_ids) if token_type_ids is not None else None
+ cls_mask = (
+ tf.pad(tf.ones([seq_len - 1, seq_len - 1], dtype=inputs_embeds.dtype), [[1, 0], [1, 0]])
+ if self.separate_cls
+ else None
+ )
+ return (position_embeds, token_type_mat, attention_mask, cls_mask)
+
+ def token_type_ids_to_mat(self, token_type_ids):
+ """Convert `token_type_ids` to `token_type_mat`."""
+ token_type_mat = tf.equal(tf.expand_dims(token_type_ids, -1), tf.expand_dims(token_type_ids, -2))
+ # Treat as in the same segment as both A & B
+ cls_ids = tf.equal(token_type_ids, tf.constant([self.cls_token_type_id], dtype=token_type_ids.dtype))
+ cls_mat = tf.logical_or(tf.expand_dims(cls_ids, -1), tf.expand_dims(cls_ids, -2))
+ return tf.logical_or(cls_mat, token_type_mat)
+
+ def get_position_embeds(self, seq_len, training=False):
+ """
+ Create and cache inputs related to relative position encoding. Those are very different depending on whether we
+ are using the factorized or the relative shift attention:
+
+ For the factorized attention, it returns the matrices (phi, pi, psi, omega) used in the paper, appendix A.2.2,
+ final formula.
+
+ For the relative shift attention, it returns all possible vectors R used in the paper, appendix A.2.1, final
+ formula.
+
+ Paper link: https://arxiv.org/abs/2006.03236
+ """
+ if self.attention_type == "factorized":
+ # Notations from the paper, appending A.2.2, final formula.
+ # We need to create and return the matrices phi, psi, pi and omega.
+ pos_seq = tf.range(0, seq_len, 1.0)
+ freq_seq = tf.range(0, self.d_model // 2, 1.0)
+ inv_freq = 1 / (10000 ** (freq_seq / (self.d_model // 2)))
+ sinusoid = tf.einsum("i,d->id", pos_seq, inv_freq)
+
+ sin_embed = tf.sin(sinusoid)
+ sin_embed_d = self.sin_dropout(sin_embed, training=training)
+ cos_embed = tf.cos(sinusoid)
+ cos_embed_d = self.cos_dropout(cos_embed, training=training)
+ # This is different from the formula on the paper...
+ phi = tf.concat([sin_embed_d, sin_embed_d], axis=-1)
+ psi = tf.concat([cos_embed, sin_embed], axis=-1)
+ pi = tf.concat([cos_embed_d, cos_embed_d], axis=-1)
+ omega = tf.concat([-sin_embed, cos_embed], axis=-1)
+ return (phi, pi, psi, omega)
+ else:
+ # Notations from the paper, appending A.2.1, final formula.
+ # We need to create and return all the possible vectors R for all blocks and shifts.
+ freq_seq = tf.range(0, self.d_model // 2, 1.0)
+ inv_freq = 1 / (10000 ** (freq_seq / (self.d_model // 2)))
+ # Maximum relative positions for the first input
+ rel_pos_id = tf.range(-seq_len * 2, seq_len * 2, 1.0)
+ zero_offset = seq_len * tf.constant(2)
+ sinusoid = tf.einsum("i,d->id", rel_pos_id, inv_freq)
+ sin_embed = self.sin_dropout(tf.sin(sinusoid), training=training)
+ cos_embed = self.cos_dropout(tf.cos(sinusoid), training=training)
+ pos_embed = tf.concat([sin_embed, cos_embed], axis=-1)
+
+ pos = tf.range(0, seq_len)
+ pooled_pos = pos
+ position_embeds_list = []
+ for block_index in range(0, self.num_blocks):
+ # For each block with block_index > 0, we need two types position embeddings:
+ # - Attention(pooled-q, unpooled-kv)
+ # - Attention(pooled-q, pooled-kv)
+ # For block_index = 0 we only need the second one and leave the first one as None.
+
+ # First type
+ position_embeds_pooling = tf.fill([1], value=-1.0)
+
+ if block_index != 0:
+ pooled_pos = self.stride_pool_pos(pos, block_index)
+
+ # construct rel_pos_id
+ stride = 2 ** (block_index - 1)
+ rel_pos = self.relative_pos(pos, stride, pooled_pos, shift=2)
+ # rel_pos = tf.expand_dims(rel_pos,1) + zero_offset
+ # rel_pos = tf.broadcast_to(rel_pos, (rel_pos.shape[0], self.d_model))
+ rel_pos = tf.cast(rel_pos, dtype=zero_offset.dtype)
+ rel_pos = rel_pos + zero_offset
+ position_embeds_pooling = tf.gather(pos_embed, rel_pos, axis=0)
+
+ # Second type
+ pos = pooled_pos
+ stride = 2**block_index
+ rel_pos = self.relative_pos(pos, stride)
+
+ # rel_pos = tf.expand_dims(rel_pos,1) + zero_offset
+ # rel_pos = tf.broadcast_to(rel_pos, (rel_pos.shape[0], self.d_model))
+ rel_pos = tf.cast(rel_pos, dtype=zero_offset.dtype)
+ rel_pos = rel_pos + zero_offset
+ tf.debugging.assert_less(rel_pos, tf.shape(pos_embed)[0])
+ position_embeds_no_pooling = tf.gather(pos_embed, rel_pos, axis=0)
+
+ position_embeds_list.append([position_embeds_no_pooling, position_embeds_pooling])
+ return position_embeds_list
+
+ def stride_pool_pos(self, pos_id, block_index):
+ """
+ Pool `pos_id` while keeping the cls token separate (if `self.separate_cls=True`).
+ """
+ if self.separate_cls:
+ # Under separate , we treat the as the first token in
+ # the previous block of the 1st real block. Since the 1st real
+ # block always has position 1, the position of the previous block
+ # will be at `1 - 2 ** block_index`.
+ cls_pos = tf.constant([-(2**block_index) + 1], dtype=pos_id.dtype)
+ pooled_pos_id = pos_id[1:-1] if self.truncate_seq else pos_id[1:]
+ return tf.concat([cls_pos, pooled_pos_id[::2]], 0)
+ else:
+ return pos_id[::2]
+
+ def relative_pos(self, pos, stride, pooled_pos=None, shift=1):
+ """
+ Build the relative positional vector between `pos` and `pooled_pos`.
+ """
+ if pooled_pos is None:
+ pooled_pos = pos
+
+ ref_point = pooled_pos[0] - pos[0]
+ num_remove = shift * shape_list(pooled_pos)[0]
+ max_dist = ref_point + num_remove * stride
+ min_dist = pooled_pos[0] - pos[-1]
+
+ return tf.range(max_dist, min_dist - 1, -stride)
+
+ def stride_pool(self, tensor, axis):
+ """
+ Perform pooling by stride slicing the tensor along the given axis.
+ """
+ if tensor is None:
+ return None
+
+ # Do the stride pool recursively if axis is a list or a tuple of ints.
+ if isinstance(axis, (list, tuple)):
+ for ax in axis:
+ tensor = self.stride_pool(tensor, ax)
+ return tensor
+
+ # Do the stride pool recursively if tensor is a list or tuple of tensors.
+ if isinstance(tensor, (tuple, list)):
+ return type(tensor)(self.stride_pool(x, axis) for x in tensor)
+
+ # Deal with negative axis
+ axis %= len(shape_list(tensor))
+
+ axis_slice = slice(None, -1, 2) if self.separate_cls and self.truncate_seq else slice(None, None, 2)
+ enc_slice = [slice(None)] * axis + [axis_slice]
+ if self.separate_cls:
+ cls_slice = [slice(None)] * axis + [slice(None, 1)]
+ tensor = tf.concat([tensor[cls_slice], tensor], axis)
+ return tensor[enc_slice]
+
+ def pool_tensor(self, tensor, mode="mean", stride=2):
+ """Apply 1D pooling to a tensor of size [B x T (x H)]."""
+ if tensor is None:
+ return None
+
+ # Do the pool recursively if tensor is a list or tuple of tensors.
+ if isinstance(tensor, (tuple, list)):
+ return type(tensor)(self.pool_tensor(tensor, mode=mode, stride=stride) for x in tensor)
+
+ if self.separate_cls:
+ suffix = tensor[:, :-1] if self.truncate_seq else tensor
+ tensor = tf.concat([tensor[:, :1], suffix], axis=1)
+
+ ndim = len(shape_list(tensor))
+ if ndim == 2:
+ tensor = tensor[:, :, None]
+
+ if mode == "mean":
+ tensor = tf.nn.avg_pool1d(tensor, stride, strides=stride, data_format="NWC", padding="SAME")
+ elif mode == "max":
+ tensor = tf.nn.max_pool1d(tensor, stride, strides=stride, data_format="NWC", padding="SAME")
+ elif mode == "min":
+ tensor = -tf.nn.max_pool1d(-tensor, stride, strides=stride, data_format="NWC", padding="SAME")
+ else:
+ raise NotImplementedError("The supported modes are 'mean', 'max' and 'min'.")
+
+ return tf.squeeze(tensor, 2) if ndim == 2 else tensor
+
+ def pre_attention_pooling(self, output, attention_inputs):
+ """Pool `output` and the proper parts of `attention_inputs` before the attention layer."""
+ position_embeds, token_type_mat, attention_mask, cls_mask = attention_inputs
+ if self.pool_q_only:
+ if self.attention_type == "factorized":
+ position_embeds = self.stride_pool(position_embeds[:2], 0) + position_embeds[2:]
+ token_type_mat = self.stride_pool(token_type_mat, 1)
+ cls_mask = self.stride_pool(cls_mask, 0)
+ output = self.pool_tensor(output, mode=self.pooling_type)
+ else:
+ self.pooling_mult *= 2
+ if self.attention_type == "factorized":
+ position_embeds = self.stride_pool(position_embeds, 0)
+ token_type_mat = self.stride_pool(token_type_mat, [1, 2])
+ cls_mask = self.stride_pool(cls_mask, [1, 2])
+ attention_mask = self.pool_tensor(attention_mask, mode="min")
+ output = self.pool_tensor(output, mode=self.pooling_type)
+ attention_inputs = (position_embeds, token_type_mat, attention_mask, cls_mask)
+ return output, attention_inputs
+
+ def post_attention_pooling(self, attention_inputs):
+ """Pool the proper parts of `attention_inputs` after the attention layer."""
+ position_embeds, token_type_mat, attention_mask, cls_mask = attention_inputs
+ if self.pool_q_only:
+ self.pooling_mult *= 2
+ if self.attention_type == "factorized":
+ position_embeds = position_embeds[:2] + self.stride_pool(position_embeds[2:], 0)
+ token_type_mat = self.stride_pool(token_type_mat, 2)
+ cls_mask = self.stride_pool(cls_mask, 1)
+ attention_mask = self.pool_tensor(attention_mask, mode="min")
+ attention_inputs = (position_embeds, token_type_mat, attention_mask, cls_mask)
+ return attention_inputs
+
+
+def _relative_shift_gather(positional_attn, context_len, shift):
+ batch_size, n_head, seq_len, max_rel_len = shape_list(positional_attn)
+ # max_rel_len = 2 * context_len + shift -1 is the numbers of possible relative positions i-j
+
+ # What's next is the same as doing the following gather in PyTorch, which might be clearer code but less efficient.
+ # idxs = context_len + torch.arange(0, context_len).unsqueeze(0) - torch.arange(0, seq_len).unsqueeze(1)
+ # # matrix of context_len + i-j
+ # return positional_attn.gather(3, idxs.expand([batch_size, n_head, context_len, context_len]))
+
+ positional_attn = tf.reshape(positional_attn, [batch_size, n_head, max_rel_len, seq_len])
+ positional_attn = positional_attn[:, :, shift:, :]
+ positional_attn = tf.reshape(positional_attn, [batch_size, n_head, seq_len, max_rel_len - shift])
+ positional_attn = positional_attn[..., :context_len]
+ return positional_attn
+
+
+class TFFunnelRelMultiheadAttention(keras.layers.Layer):
+ def __init__(self, config, block_index, **kwargs):
+ super().__init__(**kwargs)
+ self.attention_type = config.attention_type
+ self.n_head = n_head = config.n_head
+ self.d_head = d_head = config.d_head
+ self.d_model = d_model = config.d_model
+ self.initializer_range = config.initializer_range
+ self.block_index = block_index
+
+ self.hidden_dropout = keras.layers.Dropout(config.hidden_dropout)
+ self.attention_dropout = keras.layers.Dropout(config.attention_dropout)
+
+ initializer = get_initializer(config.initializer_range)
+
+ self.q_head = keras.layers.Dense(
+ n_head * d_head, use_bias=False, kernel_initializer=initializer, name="q_head"
+ )
+ self.k_head = keras.layers.Dense(n_head * d_head, kernel_initializer=initializer, name="k_head")
+ self.v_head = keras.layers.Dense(n_head * d_head, kernel_initializer=initializer, name="v_head")
+
+ self.post_proj = keras.layers.Dense(d_model, kernel_initializer=initializer, name="post_proj")
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
+ self.scale = 1.0 / (d_head**0.5)
+
+ def build(self, input_shape=None):
+ n_head, d_head, d_model = self.n_head, self.d_head, self.d_model
+ initializer = get_initializer(self.initializer_range)
+
+ self.r_w_bias = self.add_weight(
+ shape=(n_head, d_head), initializer=initializer, trainable=True, name="r_w_bias"
+ )
+ self.r_r_bias = self.add_weight(
+ shape=(n_head, d_head), initializer=initializer, trainable=True, name="r_r_bias"
+ )
+ self.r_kernel = self.add_weight(
+ shape=(d_model, n_head, d_head), initializer=initializer, trainable=True, name="r_kernel"
+ )
+ self.r_s_bias = self.add_weight(
+ shape=(n_head, d_head), initializer=initializer, trainable=True, name="r_s_bias"
+ )
+ self.seg_embed = self.add_weight(
+ shape=(2, n_head, d_head), initializer=initializer, trainable=True, name="seg_embed"
+ )
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "q_head", None) is not None:
+ with tf.name_scope(self.q_head.name):
+ self.q_head.build([None, None, d_model])
+ if getattr(self, "k_head", None) is not None:
+ with tf.name_scope(self.k_head.name):
+ self.k_head.build([None, None, d_model])
+ if getattr(self, "v_head", None) is not None:
+ with tf.name_scope(self.v_head.name):
+ self.v_head.build([None, None, d_model])
+ if getattr(self, "post_proj", None) is not None:
+ with tf.name_scope(self.post_proj.name):
+ self.post_proj.build([None, None, n_head * d_head])
+ if getattr(self, "layer_norm", None) is not None:
+ with tf.name_scope(self.layer_norm.name):
+ self.layer_norm.build([None, None, d_model])
+
+ def relative_positional_attention(self, position_embeds, q_head, context_len, cls_mask=None):
+ """Relative attention score for the positional encodings"""
+ # q_head has shape batch_size x sea_len x n_head x d_head
+ if self.attention_type == "factorized":
+ # Notations from the paper, appending A.2.2, final formula (https://arxiv.org/abs/2006.03236)
+ # phi and pi have shape seq_len x d_model, psi and omega have shape context_len x d_model
+ phi, pi, psi, omega = position_embeds
+ # Shape n_head x d_head
+ u = self.r_r_bias * self.scale
+ # Shape d_model x n_head x d_head
+ w_r = self.r_kernel
+
+ # Shape batch_size x sea_len x n_head x d_model
+ q_r_attention = tf.einsum("binh,dnh->bind", q_head + u, w_r)
+ q_r_attention_1 = q_r_attention * phi[:, None]
+ q_r_attention_2 = q_r_attention * pi[:, None]
+
+ # Shape batch_size x n_head x seq_len x context_len
+ positional_attn = tf.einsum("bind,jd->bnij", q_r_attention_1, psi) + tf.einsum(
+ "bind,jd->bnij", q_r_attention_2, omega
+ )
+ else:
+ # Notations from the paper, appending A.2.1, final formula (https://arxiv.org/abs/2006.03236)
+ # Grab the proper positional encoding, shape max_rel_len x d_model
+ if shape_list(q_head)[1] != context_len:
+ shift = 2
+ r = position_embeds[self.block_index][1]
+ else:
+ shift = 1
+ r = position_embeds[self.block_index][0]
+ # Shape n_head x d_head
+ v = self.r_r_bias * self.scale
+ # Shape d_model x n_head x d_head
+ w_r = self.r_kernel
+
+ # Shape max_rel_len x n_head x d_model
+ r_head = tf.einsum("td,dnh->tnh", r, w_r)
+ # Shape batch_size x n_head x seq_len x max_rel_len
+ positional_attn = tf.einsum("binh,tnh->bnit", q_head + v, r_head)
+ # Shape batch_size x n_head x seq_len x context_len
+ positional_attn = _relative_shift_gather(positional_attn, context_len, shift)
+
+ if cls_mask is not None:
+ positional_attn *= cls_mask
+ return positional_attn
+
+ def relative_token_type_attention(self, token_type_mat, q_head, cls_mask=None):
+ """Relative attention score for the token_type_ids"""
+ if token_type_mat is None:
+ return 0
+ batch_size, seq_len, context_len = shape_list(token_type_mat)
+ # q_head has shape batch_size x seq_len x n_head x d_head
+ # Shape n_head x d_head
+ r_s_bias = self.r_s_bias * self.scale
+
+ # Shape batch_size x n_head x seq_len x 2
+ token_type_bias = tf.einsum("bind,snd->bnis", q_head + r_s_bias, self.seg_embed)
+ # Shape batch_size x n_head x seq_len x context_len
+ token_type_mat = tf.tile(token_type_mat[:, None], [1, shape_list(q_head)[2], 1, 1])
+ # token_type_mat = tf.broadcast_to(token_type_mat[:, None], new_shape)
+ # Shapes batch_size x n_head x seq_len
+ diff_token_type, same_token_type = tf.split(token_type_bias, 2, axis=-1)
+ # Shape batch_size x n_head x seq_len x context_len
+ token_type_attn = tf.where(
+ token_type_mat,
+ tf.tile(same_token_type, [1, 1, 1, context_len]),
+ tf.tile(diff_token_type, [1, 1, 1, context_len]),
+ )
+
+ if cls_mask is not None:
+ token_type_attn *= cls_mask
+ return token_type_attn
+
+ def call(self, query, key, value, attention_inputs, output_attentions=False, training=False):
+ # query has shape batch_size x seq_len x d_model
+ # key and value have shapes batch_size x context_len x d_model
+ position_embeds, token_type_mat, attention_mask, cls_mask = attention_inputs
+
+ batch_size, seq_len, _ = shape_list(query)
+ context_len = shape_list(key)[1]
+ n_head, d_head = self.n_head, self.d_head
+
+ # Shape batch_size x seq_len x n_head x d_head
+ q_head = tf.reshape(self.q_head(query), [batch_size, seq_len, n_head, d_head])
+ # Shapes batch_size x context_len x n_head x d_head
+ k_head = tf.reshape(self.k_head(key), [batch_size, context_len, n_head, d_head])
+ v_head = tf.reshape(self.v_head(value), [batch_size, context_len, n_head, d_head])
+
+ q_head = q_head * self.scale
+ # Shape n_head x d_head
+ r_w_bias = self.r_w_bias * self.scale
+ # Shapes batch_size x n_head x seq_len x context_len
+ content_score = tf.einsum("bind,bjnd->bnij", q_head + r_w_bias, k_head)
+ positional_attn = self.relative_positional_attention(position_embeds, q_head, context_len, cls_mask)
+ token_type_attn = self.relative_token_type_attention(token_type_mat, q_head, cls_mask)
+
+ # merge attention scores
+ attn_score = content_score + positional_attn + token_type_attn
+
+ # perform masking
+ if attention_mask is not None:
+ attention_mask = tf.cast(attention_mask, dtype=attn_score.dtype)
+ attn_score = attn_score - (INF * (1 - attention_mask[:, None, None]))
+
+ # attention probability
+ attn_prob = stable_softmax(attn_score, axis=-1)
+ attn_prob = self.attention_dropout(attn_prob, training=training)
+
+ # attention output, shape batch_size x seq_len x n_head x d_head
+ attn_vec = tf.einsum("bnij,bjnd->bind", attn_prob, v_head)
+
+ # Shape shape batch_size x seq_len x d_model
+ attn_out = self.post_proj(tf.reshape(attn_vec, [batch_size, seq_len, n_head * d_head]))
+ attn_out = self.hidden_dropout(attn_out, training=training)
+
+ output = self.layer_norm(query + attn_out)
+ return (output, attn_prob) if output_attentions else (output,)
+
+
+class TFFunnelPositionwiseFFN(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ initializer = get_initializer(config.initializer_range)
+ self.linear_1 = keras.layers.Dense(config.d_inner, kernel_initializer=initializer, name="linear_1")
+ self.activation_function = get_tf_activation(config.hidden_act)
+ self.activation_dropout = keras.layers.Dropout(config.activation_dropout)
+ self.linear_2 = keras.layers.Dense(config.d_model, kernel_initializer=initializer, name="linear_2")
+ self.dropout = keras.layers.Dropout(config.hidden_dropout)
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
+ self.config = config
+
+ def call(self, hidden, training=False):
+ h = self.linear_1(hidden)
+ h = self.activation_function(h)
+ h = self.activation_dropout(h, training=training)
+ h = self.linear_2(h)
+ h = self.dropout(h, training=training)
+ return self.layer_norm(hidden + h)
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "linear_1", None) is not None:
+ with tf.name_scope(self.linear_1.name):
+ self.linear_1.build([None, None, self.config.d_model])
+ if getattr(self, "linear_2", None) is not None:
+ with tf.name_scope(self.linear_2.name):
+ self.linear_2.build([None, None, self.config.d_inner])
+ if getattr(self, "layer_norm", None) is not None:
+ with tf.name_scope(self.layer_norm.name):
+ self.layer_norm.build([None, None, self.config.d_model])
+
+
+class TFFunnelLayer(keras.layers.Layer):
+ def __init__(self, config, block_index, **kwargs):
+ super().__init__(**kwargs)
+ self.attention = TFFunnelRelMultiheadAttention(config, block_index, name="attention")
+ self.ffn = TFFunnelPositionwiseFFN(config, name="ffn")
+
+ def call(self, query, key, value, attention_inputs, output_attentions=False, training=False):
+ attn = self.attention(
+ query, key, value, attention_inputs, output_attentions=output_attentions, training=training
+ )
+ output = self.ffn(attn[0], training=training)
+ return (output, attn[1]) if output_attentions else (output,)
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "attention", None) is not None:
+ with tf.name_scope(self.attention.name):
+ self.attention.build(None)
+ if getattr(self, "ffn", None) is not None:
+ with tf.name_scope(self.ffn.name):
+ self.ffn.build(None)
+
+
+class TFFunnelEncoder(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.separate_cls = config.separate_cls
+ self.pool_q_only = config.pool_q_only
+ self.block_repeats = config.block_repeats
+ self.attention_structure = TFFunnelAttentionStructure(config)
+ self.blocks = [
+ [TFFunnelLayer(config, block_index, name=f"blocks_._{block_index}_._{i}") for i in range(block_size)]
+ for block_index, block_size in enumerate(config.block_sizes)
+ ]
+
+ def call(
+ self,
+ inputs_embeds,
+ attention_mask=None,
+ token_type_ids=None,
+ output_attentions=False,
+ output_hidden_states=False,
+ return_dict=True,
+ training=False,
+ ):
+ # The pooling is not implemented on long tensors, so we convert this mask.
+ # attention_mask = tf.cast(attention_mask, inputs_embeds.dtype)
+ attention_inputs = self.attention_structure.init_attention_inputs(
+ inputs_embeds,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ training=training,
+ )
+ hidden = inputs_embeds
+
+ all_hidden_states = (inputs_embeds,) if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ for block_index, block in enumerate(self.blocks):
+ pooling_flag = shape_list(hidden)[1] > (2 if self.separate_cls else 1)
+ pooling_flag = pooling_flag and block_index > 0
+ pooled_hidden = tf.zeros(shape_list(hidden))
+
+ if pooling_flag:
+ pooled_hidden, attention_inputs = self.attention_structure.pre_attention_pooling(
+ hidden, attention_inputs
+ )
+
+ for layer_index, layer in enumerate(block):
+ for repeat_index in range(self.block_repeats[block_index]):
+ do_pooling = (repeat_index == 0) and (layer_index == 0) and pooling_flag
+ if do_pooling:
+ query = pooled_hidden
+ key = value = hidden if self.pool_q_only else pooled_hidden
+ else:
+ query = key = value = hidden
+ layer_output = layer(
+ query, key, value, attention_inputs, output_attentions=output_attentions, training=training
+ )
+ hidden = layer_output[0]
+ if do_pooling:
+ attention_inputs = self.attention_structure.post_attention_pooling(attention_inputs)
+
+ if output_attentions:
+ all_attentions = all_attentions + layer_output[1:]
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden, all_hidden_states, all_attentions] if v is not None)
+ return TFBaseModelOutput(last_hidden_state=hidden, hidden_states=all_hidden_states, attentions=all_attentions)
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ for block in self.blocks:
+ for layer in block:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+def upsample(x, stride, target_len, separate_cls=True, truncate_seq=False):
+ """
+ Upsample tensor `x` to match `target_len` by repeating the tokens `stride` time on the sequence length dimension.
+ """
+ if stride == 1:
+ return x
+ if separate_cls:
+ cls = x[:, :1]
+ x = x[:, 1:]
+ output = tf.repeat(x, repeats=stride, axis=1)
+ if separate_cls:
+ if truncate_seq:
+ output = tf.pad(output, [[0, 0], [0, stride - 1], [0, 0]])
+ output = output[:, : target_len - 1]
+ output = tf.concat([cls, output], axis=1)
+ else:
+ output = output[:, :target_len]
+ return output
+
+
+class TFFunnelDecoder(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.separate_cls = config.separate_cls
+ self.truncate_seq = config.truncate_seq
+ self.stride = 2 ** (len(config.block_sizes) - 1)
+ self.attention_structure = TFFunnelAttentionStructure(config)
+ self.layers = [TFFunnelLayer(config, 0, name=f"layers_._{i}") for i in range(config.num_decoder_layers)]
+
+ def call(
+ self,
+ final_hidden,
+ first_block_hidden,
+ attention_mask=None,
+ token_type_ids=None,
+ output_attentions=False,
+ output_hidden_states=False,
+ return_dict=True,
+ training=False,
+ ):
+ upsampled_hidden = upsample(
+ final_hidden,
+ stride=self.stride,
+ target_len=shape_list(first_block_hidden)[1],
+ separate_cls=self.separate_cls,
+ truncate_seq=self.truncate_seq,
+ )
+
+ hidden = upsampled_hidden + first_block_hidden
+ all_hidden_states = (hidden,) if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ attention_inputs = self.attention_structure.init_attention_inputs(
+ hidden,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ training=training,
+ )
+
+ for layer in self.layers:
+ layer_output = layer(
+ hidden, hidden, hidden, attention_inputs, output_attentions=output_attentions, training=training
+ )
+ hidden = layer_output[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + layer_output[1:]
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden, all_hidden_states, all_attentions] if v is not None)
+ return TFBaseModelOutput(last_hidden_state=hidden, hidden_states=all_hidden_states, attentions=all_attentions)
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "layers", None) is not None:
+ for layer in self.layers:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+@keras_serializable
+class TFFunnelBaseLayer(keras.layers.Layer):
+ """Base model without decoder"""
+
+ config_class = FunnelConfig
+
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.output_attentions = config.output_attentions
+ self.output_hidden_states = config.output_hidden_states
+ self.return_dict = config.use_return_dict
+
+ self.embeddings = TFFunnelEmbeddings(config, name="embeddings")
+ self.encoder = TFFunnelEncoder(config, name="encoder")
+
+ def get_input_embeddings(self):
+ return self.embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.weight = value
+ self.embeddings.vocab_size = shape_list(value)[0]
+
+ def _prune_heads(self, heads_to_prune):
+ raise NotImplementedError # Not implemented yet in the library fr TF 2.0 models
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ token_type_ids=None,
+ inputs_embeds=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ training=False,
+ ):
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = shape_list(input_ids)
+ elif inputs_embeds is not None:
+ input_shape = shape_list(inputs_embeds)[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if attention_mask is None:
+ attention_mask = tf.fill(input_shape, 1)
+
+ if token_type_ids is None:
+ token_type_ids = tf.fill(input_shape, 0)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embeddings(input_ids, training=training)
+
+ encoder_outputs = self.encoder(
+ inputs_embeds,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return encoder_outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embeddings", None) is not None:
+ with tf.name_scope(self.embeddings.name):
+ self.embeddings.build(None)
+ if getattr(self, "encoder", None) is not None:
+ with tf.name_scope(self.encoder.name):
+ self.encoder.build(None)
+
+
+@keras_serializable
+class TFFunnelMainLayer(keras.layers.Layer):
+ """Base model with decoder"""
+
+ config_class = FunnelConfig
+
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.block_sizes = config.block_sizes
+ self.output_attentions = config.output_attentions
+ self.output_hidden_states = config.output_hidden_states
+ self.return_dict = config.use_return_dict
+
+ self.embeddings = TFFunnelEmbeddings(config, name="embeddings")
+ self.encoder = TFFunnelEncoder(config, name="encoder")
+ self.decoder = TFFunnelDecoder(config, name="decoder")
+
+ def get_input_embeddings(self):
+ return self.embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.weight = value
+ self.embeddings.vocab_size = shape_list(value)[0]
+
+ def _prune_heads(self, heads_to_prune):
+ raise NotImplementedError # Not implemented yet in the library fr TF 2.0 models
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ token_type_ids=None,
+ inputs_embeds=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ training=False,
+ ):
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = shape_list(input_ids)
+ elif inputs_embeds is not None:
+ input_shape = shape_list(inputs_embeds)[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if attention_mask is None:
+ attention_mask = tf.fill(input_shape, 1)
+
+ if token_type_ids is None:
+ token_type_ids = tf.fill(input_shape, 0)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embeddings(input_ids, training=training)
+
+ encoder_outputs = self.encoder(
+ inputs_embeds,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=True,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ decoder_outputs = self.decoder(
+ final_hidden=encoder_outputs[0],
+ first_block_hidden=encoder_outputs[1][self.block_sizes[0]],
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ if not return_dict:
+ idx = 0
+ outputs = (decoder_outputs[0],)
+ if output_hidden_states:
+ idx += 1
+ outputs = outputs + (encoder_outputs[1] + decoder_outputs[idx],)
+ if output_attentions:
+ idx += 1
+ outputs = outputs + (encoder_outputs[2] + decoder_outputs[idx],)
+ return outputs
+
+ return TFBaseModelOutput(
+ last_hidden_state=decoder_outputs[0],
+ hidden_states=(encoder_outputs.hidden_states + decoder_outputs.hidden_states)
+ if output_hidden_states
+ else None,
+ attentions=(encoder_outputs.attentions + decoder_outputs.attentions) if output_attentions else None,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embeddings", None) is not None:
+ with tf.name_scope(self.embeddings.name):
+ self.embeddings.build(None)
+ if getattr(self, "encoder", None) is not None:
+ with tf.name_scope(self.encoder.name):
+ self.encoder.build(None)
+ if getattr(self, "decoder", None) is not None:
+ with tf.name_scope(self.decoder.name):
+ self.decoder.build(None)
+
+
+class TFFunnelDiscriminatorPredictions(keras.layers.Layer):
+ """Prediction module for the discriminator, made up of two dense layers."""
+
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ initializer = get_initializer(config.initializer_range)
+ self.dense = keras.layers.Dense(config.d_model, kernel_initializer=initializer, name="dense")
+ self.activation_function = get_tf_activation(config.hidden_act)
+ self.dense_prediction = keras.layers.Dense(1, kernel_initializer=initializer, name="dense_prediction")
+ self.config = config
+
+ def call(self, discriminator_hidden_states):
+ hidden_states = self.dense(discriminator_hidden_states)
+ hidden_states = self.activation_function(hidden_states)
+ logits = tf.squeeze(self.dense_prediction(hidden_states))
+ return logits
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.d_model])
+ if getattr(self, "dense_prediction", None) is not None:
+ with tf.name_scope(self.dense_prediction.name):
+ self.dense_prediction.build([None, None, self.config.d_model])
+
+
+class TFFunnelMaskedLMHead(keras.layers.Layer):
+ def __init__(self, config, input_embeddings, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+ self.hidden_size = config.hidden_size
+ self.input_embeddings = input_embeddings
+
+ def build(self, input_shape):
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
+
+ super().build(input_shape)
+
+ def get_output_embeddings(self):
+ return self.input_embeddings
+
+ def set_output_embeddings(self, value):
+ self.input_embeddings.weight = value
+ self.input_embeddings.vocab_size = shape_list(value)[0]
+
+ def get_bias(self):
+ return {"bias": self.bias}
+
+ def set_bias(self, value):
+ self.bias = value["bias"]
+ self.config.vocab_size = shape_list(value["bias"])[0]
+
+ def call(self, hidden_states, training=False):
+ seq_length = shape_list(tensor=hidden_states)[1]
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size])
+ hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
+
+ return hidden_states
+
+
+class TFFunnelClassificationHead(keras.layers.Layer):
+ def __init__(self, config, n_labels, **kwargs):
+ super().__init__(**kwargs)
+ initializer = get_initializer(config.initializer_range)
+ self.linear_hidden = keras.layers.Dense(config.d_model, kernel_initializer=initializer, name="linear_hidden")
+ self.dropout = keras.layers.Dropout(config.hidden_dropout)
+ self.linear_out = keras.layers.Dense(n_labels, kernel_initializer=initializer, name="linear_out")
+ self.config = config
+
+ def call(self, hidden, training=False):
+ hidden = self.linear_hidden(hidden)
+ hidden = keras.activations.tanh(hidden)
+ hidden = self.dropout(hidden, training=training)
+ return self.linear_out(hidden)
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "linear_hidden", None) is not None:
+ with tf.name_scope(self.linear_hidden.name):
+ self.linear_hidden.build([None, None, self.config.d_model])
+ if getattr(self, "linear_out", None) is not None:
+ with tf.name_scope(self.linear_out.name):
+ self.linear_out.build([None, None, self.config.d_model])
+
+
+class TFFunnelPreTrainedModel(TFPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = FunnelConfig
+ base_model_prefix = "funnel"
+
+ @property
+ def dummy_inputs(self):
+ # Funnel misbehaves with very small inputs, so we override and make them a bit bigger
+ return {"input_ids": tf.ones((1, 3), dtype=tf.int32)}
+
+
+@dataclass
+class TFFunnelForPreTrainingOutput(ModelOutput):
+ """
+ Output type of [`FunnelForPreTraining`].
+
+ Args:
+ logits (`tf.Tensor` of shape `(batch_size, sequence_length)`):
+ Prediction scores of the head (scores for each token before SoftMax).
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ logits: tf.Tensor = None
+ hidden_states: Tuple[tf.Tensor] | None = None
+ attentions: Tuple[tf.Tensor] | None = None
+
+
+FUNNEL_START_DOCSTRING = r"""
+
+ The Funnel Transformer model was proposed in [Funnel-Transformer: Filtering out Sequential Redundancy for Efficient
+ Language Processing](https://arxiv.org/abs/2006.03236) by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le.
+
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TensorFlow models and layers in `transformers` accept two formats as input:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional argument.
+
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
+ positional argument:
+
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
+
+ Note that when creating models and layers with
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
+ about any of this, as you can just pass inputs like you would to any other Python function!
+
+
+
+ Parameters:
+ config ([`XxxConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+FUNNEL_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
+ [`PreTrainedTokenizer.encode`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
+ config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+"""
+
+
+@add_start_docstrings(
+ """
+ The base Funnel Transformer Model transformer outputting raw hidden-states without upsampling head (also called
+ decoder) or any task-specific head on top.
+ """,
+ FUNNEL_START_DOCSTRING,
+)
+class TFFunnelBaseModel(TFFunnelPreTrainedModel):
+ def __init__(self, config: FunnelConfig, *inputs, **kwargs) -> None:
+ super().__init__(config, *inputs, **kwargs)
+ self.funnel = TFFunnelBaseLayer(config, name="funnel")
+
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint="funnel-transformer/small-base",
+ output_type=TFBaseModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ @unpack_inputs
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[Tuple[tf.Tensor], TFBaseModelOutput]:
+ return self.funnel(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ def serving_output(self, output):
+ # hidden_states and attentions not converted to Tensor with tf.convert_to_tensor as they are all of
+ # different dimensions
+ return TFBaseModelOutput(
+ last_hidden_state=output.last_hidden_state,
+ hidden_states=output.hidden_states,
+ attentions=output.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "funnel", None) is not None:
+ with tf.name_scope(self.funnel.name):
+ self.funnel.build(None)
+
+
+@add_start_docstrings(
+ "The bare Funnel Transformer Model transformer outputting raw hidden-states without any specific head on top.",
+ FUNNEL_START_DOCSTRING,
+)
+class TFFunnelModel(TFFunnelPreTrainedModel):
+ def __init__(self, config: FunnelConfig, *inputs, **kwargs) -> None:
+ super().__init__(config, *inputs, **kwargs)
+ self.funnel = TFFunnelMainLayer(config, name="funnel")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint="funnel-transformer/small",
+ output_type=TFBaseModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[Tuple[tf.Tensor], TFBaseModelOutput]:
+ return self.funnel(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ def serving_output(self, output):
+ # hidden_states and attentions not converted to Tensor with tf.convert_to_tensor as they are all of
+ # different dimensions
+ return TFBaseModelOutput(
+ last_hidden_state=output.last_hidden_state,
+ hidden_states=output.hidden_states,
+ attentions=output.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "funnel", None) is not None:
+ with tf.name_scope(self.funnel.name):
+ self.funnel.build(None)
+
+
+@add_start_docstrings(
+ """
+ Funnel model with a binary classification head on top as used during pretraining for identifying generated tokens.
+ """,
+ FUNNEL_START_DOCSTRING,
+)
+class TFFunnelForPreTraining(TFFunnelPreTrainedModel):
+ def __init__(self, config: FunnelConfig, **kwargs) -> None:
+ super().__init__(config, **kwargs)
+
+ self.funnel = TFFunnelMainLayer(config, name="funnel")
+ self.discriminator_predictions = TFFunnelDiscriminatorPredictions(config, name="discriminator_predictions")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=TFFunnelForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ **kwargs,
+ ) -> Union[Tuple[tf.Tensor], TFFunnelForPreTrainingOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, TFFunnelForPreTraining
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("funnel-transformer/small")
+ >>> model = TFFunnelForPreTraining.from_pretrained("funnel-transformer/small")
+
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
+ >>> logits = model(inputs).logits
+ ```"""
+ discriminator_hidden_states = self.funnel(
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ inputs_embeds,
+ output_attentions,
+ output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ discriminator_sequence_output = discriminator_hidden_states[0]
+ logits = self.discriminator_predictions(discriminator_sequence_output)
+
+ if not return_dict:
+ return (logits,) + discriminator_hidden_states[1:]
+
+ return TFFunnelForPreTrainingOutput(
+ logits=logits,
+ hidden_states=discriminator_hidden_states.hidden_states,
+ attentions=discriminator_hidden_states.attentions,
+ )
+
+ def serving_output(self, output):
+ # hidden_states and attentions not converted to Tensor with tf.convert_to_tensor as they are all of
+ # different dimensions
+ return TFFunnelForPreTrainingOutput(
+ logits=output.logits, hidden_states=output.hidden_states, attentions=output.attentions
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "funnel", None) is not None:
+ with tf.name_scope(self.funnel.name):
+ self.funnel.build(None)
+ if getattr(self, "discriminator_predictions", None) is not None:
+ with tf.name_scope(self.discriminator_predictions.name):
+ self.discriminator_predictions.build(None)
+
+
+@add_start_docstrings("""Funnel Model with a `language modeling` head on top.""", FUNNEL_START_DOCSTRING)
+class TFFunnelForMaskedLM(TFFunnelPreTrainedModel, TFMaskedLanguageModelingLoss):
+ def __init__(self, config: FunnelConfig, *inputs, **kwargs) -> None:
+ super().__init__(config, *inputs, **kwargs)
+
+ self.funnel = TFFunnelMainLayer(config, name="funnel")
+ self.lm_head = TFFunnelMaskedLMHead(config, self.funnel.embeddings, name="lm_head")
+
+ def get_lm_head(self) -> TFFunnelMaskedLMHead:
+ return self.lm_head
+
+ def get_prefix_bias_name(self) -> str:
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
+ return self.name + "/" + self.lm_head.name
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint="funnel-transformer/small",
+ output_type=TFMaskedLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: bool = False,
+ ) -> Union[Tuple[tf.Tensor], TFMaskedLMOutput]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ """
+ outputs = self.funnel(
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ inputs_embeds,
+ output_attentions,
+ output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+ prediction_scores = self.lm_head(sequence_output, training=training)
+
+ loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores)
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFMaskedLMOutput(
+ loss=loss,
+ logits=prediction_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def serving_output(self, output: TFMaskedLMOutput) -> TFMaskedLMOutput:
+ # hidden_states and attentions not converted to Tensor with tf.convert_to_tensor as they are all of
+ # different dimensions
+ return TFMaskedLMOutput(logits=output.logits, hidden_states=output.hidden_states, attentions=output.attentions)
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "funnel", None) is not None:
+ with tf.name_scope(self.funnel.name):
+ self.funnel.build(None)
+ if getattr(self, "lm_head", None) is not None:
+ with tf.name_scope(self.lm_head.name):
+ self.lm_head.build(None)
+
+
+@add_start_docstrings(
+ """
+ Funnel Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
+ output) e.g. for GLUE tasks.
+ """,
+ FUNNEL_START_DOCSTRING,
+)
+class TFFunnelForSequenceClassification(TFFunnelPreTrainedModel, TFSequenceClassificationLoss):
+ def __init__(self, config: FunnelConfig, *inputs, **kwargs) -> None:
+ super().__init__(config, *inputs, **kwargs)
+ self.num_labels = config.num_labels
+
+ self.funnel = TFFunnelBaseLayer(config, name="funnel")
+ self.classifier = TFFunnelClassificationHead(config, config.num_labels, name="classifier")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint="funnel-transformer/small-base",
+ output_type=TFSequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: bool = False,
+ ) -> Union[Tuple[tf.Tensor], TFSequenceClassifierOutput]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ outputs = self.funnel(
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ inputs_embeds,
+ output_attentions,
+ output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ last_hidden_state = outputs[0]
+ pooled_output = last_hidden_state[:, 0]
+ logits = self.classifier(pooled_output, training=training)
+
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFSequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:
+ # hidden_states and attentions not converted to Tensor with tf.convert_to_tensor as they are all of
+ # different dimensions
+ return TFSequenceClassifierOutput(
+ logits=output.logits, hidden_states=output.hidden_states, attentions=output.attentions
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "funnel", None) is not None:
+ with tf.name_scope(self.funnel.name):
+ self.funnel.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build(None)
+
+
+@add_start_docstrings(
+ """
+ Funnel Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
+ softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ FUNNEL_START_DOCSTRING,
+)
+class TFFunnelForMultipleChoice(TFFunnelPreTrainedModel, TFMultipleChoiceLoss):
+ def __init__(self, config: FunnelConfig, *inputs, **kwargs) -> None:
+ super().__init__(config, *inputs, **kwargs)
+
+ self.funnel = TFFunnelBaseLayer(config, name="funnel")
+ self.classifier = TFFunnelClassificationHead(config, 1, name="classifier")
+
+ @property
+ def dummy_inputs(self):
+ return {"input_ids": tf.ones((3, 3, 4), dtype=tf.int32)}
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint="funnel-transformer/small-base",
+ output_type=TFMultipleChoiceModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: bool = False,
+ ) -> Union[Tuple[tf.Tensor], TFMultipleChoiceModelOutput]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
+ """
+ if input_ids is not None:
+ num_choices = shape_list(input_ids)[1]
+ seq_length = shape_list(input_ids)[2]
+ else:
+ num_choices = shape_list(inputs_embeds)[1]
+ seq_length = shape_list(inputs_embeds)[2]
+
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
+ flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
+ flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
+ flat_inputs_embeds = (
+ tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3]))
+ if inputs_embeds is not None
+ else None
+ )
+
+ outputs = self.funnel(
+ flat_input_ids,
+ attention_mask=flat_attention_mask,
+ token_type_ids=flat_token_type_ids,
+ inputs_embeds=flat_inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ last_hidden_state = outputs[0]
+ pooled_output = last_hidden_state[:, 0]
+ logits = self.classifier(pooled_output, training=training)
+ reshaped_logits = tf.reshape(logits, (-1, num_choices))
+
+ loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits)
+
+ if not return_dict:
+ output = (reshaped_logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFMultipleChoiceModelOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def serving_output(self, output: TFMultipleChoiceModelOutput) -> TFMultipleChoiceModelOutput:
+ # hidden_states and attentions not converted to Tensor with tf.convert_to_tensor as they are all of
+ # different dimensions
+ return TFMultipleChoiceModelOutput(
+ logits=output.logits, hidden_states=output.hidden_states, attentions=output.attentions
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "funnel", None) is not None:
+ with tf.name_scope(self.funnel.name):
+ self.funnel.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build(None)
+
+
+@add_start_docstrings(
+ """
+ Funnel Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
+ Named-Entity-Recognition (NER) tasks.
+ """,
+ FUNNEL_START_DOCSTRING,
+)
+class TFFunnelForTokenClassification(TFFunnelPreTrainedModel, TFTokenClassificationLoss):
+ def __init__(self, config: FunnelConfig, *inputs, **kwargs) -> None:
+ super().__init__(config, *inputs, **kwargs)
+ self.num_labels = config.num_labels
+
+ self.funnel = TFFunnelMainLayer(config, name="funnel")
+ self.dropout = keras.layers.Dropout(config.hidden_dropout)
+ self.classifier = keras.layers.Dense(
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint="funnel-transformer/small",
+ output_type=TFTokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: bool = False,
+ ) -> Union[Tuple[tf.Tensor], TFTokenClassifierOutput]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+ outputs = self.funnel(
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ inputs_embeds,
+ output_attentions,
+ output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+
+ sequence_output = self.dropout(sequence_output, training=training)
+ logits = self.classifier(sequence_output)
+
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFTokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOutput:
+ # hidden_states and attentions not converted to Tensor with tf.convert_to_tensor as they are all of
+ # different dimensions
+ return TFTokenClassifierOutput(
+ logits=output.logits, hidden_states=output.hidden_states, attentions=output.attentions
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "funnel", None) is not None:
+ with tf.name_scope(self.funnel.name):
+ self.funnel.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.hidden_size])
+
+
+@add_start_docstrings(
+ """
+ Funnel Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ FUNNEL_START_DOCSTRING,
+)
+class TFFunnelForQuestionAnswering(TFFunnelPreTrainedModel, TFQuestionAnsweringLoss):
+ def __init__(self, config: FunnelConfig, *inputs, **kwargs) -> None:
+ super().__init__(config, *inputs, **kwargs)
+ self.num_labels = config.num_labels
+
+ self.funnel = TFFunnelMainLayer(config, name="funnel")
+ self.qa_outputs = keras.layers.Dense(
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint="funnel-transformer/small",
+ output_type=TFQuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ start_positions: np.ndarray | tf.Tensor | None = None,
+ end_positions: np.ndarray | tf.Tensor | None = None,
+ training: bool = False,
+ ) -> Union[Tuple[tf.Tensor], TFQuestionAnsweringModelOutput]:
+ r"""
+ start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+
+ outputs = self.funnel(
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ inputs_embeds,
+ output_attentions,
+ output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = tf.split(logits, 2, axis=-1)
+ start_logits = tf.squeeze(start_logits, axis=-1)
+ end_logits = tf.squeeze(end_logits, axis=-1)
+
+ loss = None
+ if start_positions is not None and end_positions is not None:
+ labels = {"start_position": start_positions, "end_position": end_positions}
+ loss = self.hf_compute_loss(labels, (start_logits, end_logits))
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFQuestionAnsweringModelOutput(
+ loss=loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def serving_output(self, output: TFQuestionAnsweringModelOutput) -> TFQuestionAnsweringModelOutput:
+ # hidden_states and attentions not converted to Tensor with tf.convert_to_tensor as they are all of
+ # different dimensions
+ return TFQuestionAnsweringModelOutput(
+ start_logits=output.start_logits,
+ end_logits=output.end_logits,
+ hidden_states=output.hidden_states,
+ attentions=output.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "funnel", None) is not None:
+ with tf.name_scope(self.funnel.name):
+ self.funnel.build(None)
+ if getattr(self, "qa_outputs", None) is not None:
+ with tf.name_scope(self.qa_outputs.name):
+ self.qa_outputs.build([None, None, self.config.hidden_size])
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/tokenization_funnel_fast.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/tokenization_funnel_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..9ff2a3bfefc57eb7dd67eb0e0d810cb492e87521
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/tokenization_funnel_fast.py
@@ -0,0 +1,200 @@
+# coding=utf-8
+# Copyright 2020 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Tokenization class for Funnel Transformer."""
+
+import json
+from typing import List, Optional, Tuple
+
+from tokenizers import normalizers
+
+from ...tokenization_utils_fast import PreTrainedTokenizerFast
+from ...utils import logging
+from .tokenization_funnel import FunnelTokenizer
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
+
+_model_names = [
+ "small",
+ "small-base",
+ "medium",
+ "medium-base",
+ "intermediate",
+ "intermediate-base",
+ "large",
+ "large-base",
+ "xlarge",
+ "xlarge-base",
+]
+
+
+class FunnelTokenizerFast(PreTrainedTokenizerFast):
+ r"""
+ Construct a "fast" Funnel Transformer tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
+
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
+ refer to this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ File containing the vocabulary.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ cls_token (`str`, *optional*, defaults to `""`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ mask_token (`str`, *optional*, defaults to `""`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ clean_text (`bool`, *optional*, defaults to `True`):
+ Whether or not to clean the text before tokenization by removing any control characters and replacing all
+ whitespaces by the classic one.
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
+ issue](https://github.com/huggingface/transformers/issues/328)).
+ bos_token (`str`, `optional`, defaults to `""`):
+ The beginning of sentence token.
+ eos_token (`str`, `optional`, defaults to `""`):
+ The end of sentence token.
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
+ The prefix for subwords.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ slow_tokenizer_class = FunnelTokenizer
+ cls_token_type_id: int = 2
+
+ def __init__(
+ self,
+ vocab_file=None,
+ tokenizer_file=None,
+ do_lower_case=True,
+ unk_token="",
+ sep_token="",
+ pad_token="",
+ cls_token="",
+ mask_token="",
+ bos_token="",
+ eos_token="",
+ clean_text=True,
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ wordpieces_prefix="##",
+ **kwargs,
+ ):
+ super().__init__(
+ vocab_file,
+ tokenizer_file=tokenizer_file,
+ do_lower_case=do_lower_case,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ clean_text=clean_text,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ wordpieces_prefix=wordpieces_prefix,
+ **kwargs,
+ )
+
+ normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
+ if (
+ normalizer_state.get("lowercase", do_lower_case) != do_lower_case
+ or normalizer_state.get("strip_accents", strip_accents) != strip_accents
+ or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
+ ):
+ normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
+ normalizer_state["lowercase"] = do_lower_case
+ normalizer_state["strip_accents"] = strip_accents
+ normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
+ self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
+
+ self.do_lower_case = do_lower_case
+
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.build_inputs_with_special_tokens with BERT->Funnel
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A Funnel sequence has the following format:
+
+ - single sequence: `[CLS] X [SEP]`
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+
+ if token_ids_1 is not None:
+ output += token_ids_1 + [self.sep_token_id]
+
+ return output
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Funnel
+ Transformer sequence pair mask has the following format:
+
+ ```
+ 2 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+ if token_ids_1 is None:
+ return len(cls) * [self.cls_token_type_id] + len(token_ids_0 + sep) * [0]
+ return len(cls) * [self.cls_token_type_id] + len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.save_vocabulary
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
+ return tuple(files)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..248cd7280e95e18b78129271a1aba2e6567234a1
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/convert_gptsan_tf_checkpoint_to_pytorch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/convert_gptsan_tf_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..682345b1be3993478b4a9085b1779fe6c560aab9
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/convert_gptsan_tf_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/modeling_gptsan_japanese.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/modeling_gptsan_japanese.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9441cbda5e7609ac4cd1771990c50e78c2bade88
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/modeling_gptsan_japanese.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/tokenization_gptsan_japanese.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/tokenization_gptsan_japanese.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..25ea6a9e21b2ae2cb1d9a044df1e6a783b453649
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/tokenization_gptsan_japanese.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..b7dba0b5dc0cf85f8ed83f8f02b5def4e0b21c95
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__init__.py
@@ -0,0 +1,119 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_tf_available,
+ is_tokenizers_available,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "configuration_openai": ["OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OpenAIGPTConfig"],
+ "tokenization_openai": ["OpenAIGPTTokenizer"],
+}
+
+try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_openai_fast"] = ["OpenAIGPTTokenizerFast"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_openai"] = [
+ "OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "OpenAIGPTDoubleHeadsModel",
+ "OpenAIGPTForSequenceClassification",
+ "OpenAIGPTLMHeadModel",
+ "OpenAIGPTModel",
+ "OpenAIGPTPreTrainedModel",
+ "load_tf_weights_in_openai_gpt",
+ ]
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_openai"] = [
+ "TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TFOpenAIGPTDoubleHeadsModel",
+ "TFOpenAIGPTForSequenceClassification",
+ "TFOpenAIGPTLMHeadModel",
+ "TFOpenAIGPTMainLayer",
+ "TFOpenAIGPTModel",
+ "TFOpenAIGPTPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_openai import OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OpenAIGPTConfig
+ from .tokenization_openai import OpenAIGPTTokenizer
+
+ try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_openai_fast import OpenAIGPTTokenizerFast
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_openai import (
+ OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ OpenAIGPTDoubleHeadsModel,
+ OpenAIGPTForSequenceClassification,
+ OpenAIGPTLMHeadModel,
+ OpenAIGPTModel,
+ OpenAIGPTPreTrainedModel,
+ load_tf_weights_in_openai_gpt,
+ )
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_openai import (
+ TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TFOpenAIGPTDoubleHeadsModel,
+ TFOpenAIGPTForSequenceClassification,
+ TFOpenAIGPTLMHeadModel,
+ TFOpenAIGPTMainLayer,
+ TFOpenAIGPTModel,
+ TFOpenAIGPTPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7b57a778e7ff830481a330a65b22b8b55b7daed5
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/configuration_openai.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/configuration_openai.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..eb560162bd9dc1812627f12dda3c73ca0f090c95
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/configuration_openai.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/convert_openai_original_tf_checkpoint_to_pytorch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/convert_openai_original_tf_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d027c5741a94b9ac0b71af4a4385c678a54d14af
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/convert_openai_original_tf_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/modeling_openai.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/modeling_openai.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3bb90a9d0b662e26009f7534251257b34c88bec6
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/modeling_openai.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/modeling_tf_openai.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/modeling_tf_openai.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a3fd13d975b3556163de56402f88656769434e81
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/modeling_tf_openai.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/tokenization_openai.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/tokenization_openai.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c1ff849ba6e88474b488e256a19c3de9c6270250
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/tokenization_openai.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/tokenization_openai_fast.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/tokenization_openai_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3098726c7bcea010b7dbe7ecd5d424f6b01c327b
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/__pycache__/tokenization_openai_fast.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/configuration_openai.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/configuration_openai.py
new file mode 100644
index 0000000000000000000000000000000000000000..422922c7912dec652fa3aa4a154fe6f24051d0a0
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/configuration_openai.py
@@ -0,0 +1,156 @@
+# coding=utf-8
+# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" OpenAI GPT configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class OpenAIGPTConfig(PretrainedConfig):
+ """
+ This is the configuration class to store the configuration of a [`OpenAIGPTModel`] or a [`TFOpenAIGPTModel`]. It is
+ used to instantiate a GPT model according to the specified arguments, defining the model architecture.
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the GPT
+ [openai-community/openai-gpt](https://huggingface.co/openai-community/openai-gpt) architecture from OpenAI.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 40478):
+ Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`OpenAIGPTModel`] or [`TFOpenAIGPTModel`].
+ n_positions (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ n_embd (`int`, *optional*, defaults to 768):
+ Dimensionality of the embeddings and hidden states.
+ n_layer (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ n_head (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ afn (`str` or `Callable`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ resid_pdrop (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ embd_pdrop (`int`, *optional*, defaults to 0.1):
+ The dropout ratio for the embeddings.
+ attn_pdrop (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention.
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
+ The epsilon to use in the layer normalization layers
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ summary_type (`str`, *optional*, defaults to `"cls_index"`):
+ Argument used when doing sequence summary, used in the models [`OpenAIGPTDoubleHeadsModel`] and
+ [`OpenAIGPTDoubleHeadsModel`].
+
+ Has to be one of the following options:
+
+ - `"last"`: Take the last token hidden state (like XLNet).
+ - `"first"`: Take the first token hidden state (like BERT).
+ - `"mean"`: Take the mean of all tokens hidden states.
+ - `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
+ - `"attn"`: Not implemented now, use multi-head attention.
+ summary_use_proj (`bool`, *optional*, defaults to `True`):
+ Argument used when doing sequence summary, used in the models [`OpenAIGPTDoubleHeadsModel`] and
+ [`OpenAIGPTDoubleHeadsModel`].
+
+ Whether or not to add a projection after the vector extraction.
+ summary_activation (`str`, *optional*):
+ Argument used when doing sequence summary, used in the models [`OpenAIGPTDoubleHeadsModel`] and
+ [`OpenAIGPTDoubleHeadsModel`].
+
+ Pass `"tanh"` for a tanh activation to the output, any other value will result in no activation.
+ summary_proj_to_labels (`bool`, *optional*, defaults to `True`):
+ Argument used when doing sequence summary, used in the models [`OpenAIGPTDoubleHeadsModel`] and
+ [`OpenAIGPTDoubleHeadsModel`].
+
+ Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes.
+ summary_first_dropout (`float`, *optional*, defaults to 0.1):
+ Argument used when doing sequence summary, used in the models [`OpenAIGPTDoubleHeadsModel`] and
+ [`OpenAIGPTDoubleHeadsModel`].
+
+ The dropout ratio to be used after the projection and activation.
+
+
+ Examples:
+
+ ```python
+ >>> from transformers import OpenAIGPTConfig, OpenAIGPTModel
+
+ >>> # Initializing a GPT configuration
+ >>> configuration = OpenAIGPTConfig()
+
+ >>> # Initializing a model (with random weights) from the configuration
+ >>> model = OpenAIGPTModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "openai-gpt"
+ attribute_map = {
+ "max_position_embeddings": "n_positions",
+ "hidden_size": "n_embd",
+ "num_attention_heads": "n_head",
+ "num_hidden_layers": "n_layer",
+ }
+
+ def __init__(
+ self,
+ vocab_size=40478,
+ n_positions=512,
+ n_embd=768,
+ n_layer=12,
+ n_head=12,
+ afn="gelu",
+ resid_pdrop=0.1,
+ embd_pdrop=0.1,
+ attn_pdrop=0.1,
+ layer_norm_epsilon=1e-5,
+ initializer_range=0.02,
+ summary_type="cls_index",
+ summary_use_proj=True,
+ summary_activation=None,
+ summary_proj_to_labels=True,
+ summary_first_dropout=0.1,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.n_positions = n_positions
+ self.n_embd = n_embd
+ self.n_layer = n_layer
+ self.n_head = n_head
+ self.afn = afn
+ self.resid_pdrop = resid_pdrop
+ self.embd_pdrop = embd_pdrop
+ self.attn_pdrop = attn_pdrop
+ self.layer_norm_epsilon = layer_norm_epsilon
+ self.initializer_range = initializer_range
+ self.summary_type = summary_type
+ self.summary_use_proj = summary_use_proj
+ self.summary_activation = summary_activation
+ self.summary_first_dropout = summary_first_dropout
+ self.summary_proj_to_labels = summary_proj_to_labels
+ super().__init__(**kwargs)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/convert_openai_original_tf_checkpoint_to_pytorch.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/convert_openai_original_tf_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b101aea0cc0de26defb0198b4bc5e762b7ccce8
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/convert_openai_original_tf_checkpoint_to_pytorch.py
@@ -0,0 +1,75 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert OpenAI GPT checkpoint."""
+
+
+import argparse
+
+import torch
+
+from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
+from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
+
+
+logging.set_verbosity_info()
+
+
+def convert_openai_checkpoint_to_pytorch(openai_checkpoint_folder_path, openai_config_file, pytorch_dump_folder_path):
+ # Construct model
+ if openai_config_file == "":
+ config = OpenAIGPTConfig()
+ else:
+ config = OpenAIGPTConfig.from_json_file(openai_config_file)
+ model = OpenAIGPTModel(config)
+
+ # Load weights from numpy
+ load_tf_weights_in_openai_gpt(model, config, openai_checkpoint_folder_path)
+
+ # Save pytorch-model
+ pytorch_weights_dump_path = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
+ pytorch_config_dump_path = pytorch_dump_folder_path + "/" + CONFIG_NAME
+ print(f"Save PyTorch model to {pytorch_weights_dump_path}")
+ torch.save(model.state_dict(), pytorch_weights_dump_path)
+ print(f"Save configuration file to {pytorch_config_dump_path}")
+ with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
+ f.write(config.to_json_string())
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--openai_checkpoint_folder_path",
+ default=None,
+ type=str,
+ required=True,
+ help="Path to the TensorFlow checkpoint path.",
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
+ )
+ parser.add_argument(
+ "--openai_config_file",
+ default="",
+ type=str,
+ help=(
+ "An optional config json file corresponding to the pre-trained OpenAI model. \n"
+ "This specifies the model architecture."
+ ),
+ )
+ args = parser.parse_args()
+ convert_openai_checkpoint_to_pytorch(
+ args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/modeling_openai.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/modeling_openai.py
new file mode 100644
index 0000000000000000000000000000000000000000..637aa90cff9f1db4d094b2ae0ae11fa24fde5ca8
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/modeling_openai.py
@@ -0,0 +1,859 @@
+# coding=utf-8
+# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch OpenAI GPT model."""
+
+
+import json
+import math
+import os
+from dataclasses import dataclass
+from typing import Any, Dict, Optional, Tuple, Union
+
+import torch
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import gelu_new, silu
+from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput
+from ...modeling_utils import PreTrainedModel, SequenceSummary
+from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_openai import OpenAIGPTConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "openai-community/openai-gpt"
+_CONFIG_FOR_DOC = "OpenAIGPTConfig"
+
+
+from ..deprecated._archive_maps import OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+def load_tf_weights_in_openai_gpt(model, config, openai_checkpoint_folder_path):
+ """Load tf pre-trained weights in a pytorch model (from NumPy arrays here)"""
+ import re
+
+ import numpy as np
+
+ if ".ckpt" in openai_checkpoint_folder_path:
+ openai_checkpoint_folder_path = os.path.dirname(openai_checkpoint_folder_path)
+
+ logger.info(f"Loading weights from {openai_checkpoint_folder_path}")
+
+ with open(openai_checkpoint_folder_path + "/parameters_names.json", "r", encoding="utf-8") as names_handle:
+ names = json.load(names_handle)
+ with open(openai_checkpoint_folder_path + "/params_shapes.json", "r", encoding="utf-8") as shapes_handle:
+ shapes = json.load(shapes_handle)
+ offsets = np.cumsum([np.prod(shape) for shape in shapes])
+ init_params = [np.load(openai_checkpoint_folder_path + f"/params_{n}.npy") for n in range(10)]
+ init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]
+ init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]
+
+ # This was used when we had a single embedding matrix for positions and tokens
+ # init_params[0] = np.concatenate([init_params[1], init_params[0]], 0)
+ # del init_params[1]
+ init_params = [arr.squeeze() for arr in init_params]
+
+ # Check that the token and position embeddings weight dimensions map those of the init parameters.
+ if model.tokens_embed.weight.shape != init_params[1].shape:
+ raise ValueError(
+ f"tokens_embed.weight.shape: {model.tokens_embed.weight.shape} does not match init_param[1].shape:"
+ f" {init_params[1].shape}"
+ )
+
+ if model.positions_embed.weight.shape != init_params[0].shape:
+ raise ValueError(
+ f"positions_embed.weight.shape: {model.positions_embed.weight.shape} does not match init_param[0].shape:"
+ f" {init_params[0].shape}"
+ )
+
+ model.tokens_embed.weight.data = torch.from_numpy(init_params[1])
+ model.positions_embed.weight.data = torch.from_numpy(init_params[0])
+ names.pop(0)
+ # Pop position and token embedding arrays
+ init_params.pop(0)
+ init_params.pop(0)
+
+ for name, array in zip(names, init_params): # names[1:n_transfer], init_params[1:n_transfer]):
+ name = name[6:] # skip "model/"
+ if name[-2:] != ":0":
+ raise ValueError(f"Layer {name} does not end with :0")
+ name = name[:-2]
+ name = name.split("/")
+ pointer = model
+ for m_name in name:
+ if re.fullmatch(r"[A-Za-z]+\d+", m_name):
+ scope_names = re.split(r"(\d+)", m_name)
+ else:
+ scope_names = [m_name]
+ if scope_names[0] == "g":
+ pointer = getattr(pointer, "weight")
+ elif scope_names[0] == "b":
+ pointer = getattr(pointer, "bias")
+ elif scope_names[0] == "w":
+ pointer = getattr(pointer, "weight")
+ else:
+ pointer = getattr(pointer, scope_names[0])
+ if len(scope_names) >= 2:
+ num = int(scope_names[1])
+ pointer = pointer[num]
+
+ # Ensure that the pointer and array have compatible shapes.
+ if pointer.shape != array.shape:
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
+
+ logger.info(f"Initialize PyTorch weight {name}")
+ pointer.data = torch.from_numpy(array)
+ return model
+
+
+ACT_FNS = {"relu": nn.ReLU(), "silu": silu, "gelu": gelu_new, "swish": silu}
+
+
+class Attention(nn.Module):
+ def __init__(self, nx, n_positions, config, scale=False):
+ super().__init__()
+ n_state = nx # in Attention: n_state=768 (nx=n_embd)
+ # [switch nx => n_state from Block to Attention to keep identical to TF implementation]
+ if n_state % config.n_head != 0:
+ raise ValueError(f"Attention n_state shape: {n_state} must be divisible by config.n_head {config.n_head}")
+ self.register_buffer(
+ "bias",
+ torch.tril(torch.ones(n_positions, n_positions)).view(1, 1, n_positions, n_positions),
+ persistent=False,
+ )
+ self.n_head = config.n_head
+ self.split_size = n_state
+ self.scale = scale
+
+ self.c_attn = Conv1D(n_state * 3, nx)
+ self.c_proj = Conv1D(n_state, nx)
+ self.attn_dropout = nn.Dropout(config.attn_pdrop)
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.n_head, self.split_size // self.n_head, self.pruned_heads
+ )
+ index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
+ # Prune conv1d layers
+ self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
+ self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
+ # Update hyper params
+ self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))
+ self.n_head = self.n_head - len(heads)
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def _attn(self, q, k, v, attention_mask=None, head_mask=None, output_attentions=False):
+ w = torch.matmul(q, k)
+ if self.scale:
+ w = w / math.sqrt(v.size(-1))
+ # w = w * self.bias + -1e9 * (1 - self.bias) # TF implementation method: mask_attn_weights
+ # XD: self.b may be larger than w, so we need to crop it
+ b = self.bias[:, :, : w.size(-2), : w.size(-1)]
+ w = w * b + -1e4 * (1 - b)
+
+ if attention_mask is not None:
+ # Apply the attention mask
+ w = w + attention_mask
+
+ w = nn.functional.softmax(w, dim=-1)
+ w = self.attn_dropout(w)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ w = w * head_mask
+
+ outputs = [torch.matmul(w, v)]
+ if output_attentions:
+ outputs.append(w)
+ return outputs
+
+ def merge_heads(self, x):
+ x = x.permute(0, 2, 1, 3).contiguous()
+ new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
+ return x.view(*new_x_shape) # in Tensorflow implementation: fct merge_states
+
+ def split_heads(self, x, k=False):
+ new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
+ x = x.view(*new_x_shape) # in Tensorflow implementation: fct split_states
+ if k:
+ return x.permute(0, 2, 3, 1)
+ else:
+ return x.permute(0, 2, 1, 3)
+
+ def forward(self, x, attention_mask=None, head_mask=None, output_attentions=False):
+ x = self.c_attn(x)
+ query, key, value = x.split(self.split_size, dim=2)
+ query = self.split_heads(query)
+ key = self.split_heads(key, k=True)
+ value = self.split_heads(value)
+
+ attn_outputs = self._attn(query, key, value, attention_mask, head_mask, output_attentions)
+ a = attn_outputs[0]
+
+ a = self.merge_heads(a)
+ a = self.c_proj(a)
+ a = self.resid_dropout(a)
+
+ outputs = [a] + attn_outputs[1:]
+ return outputs # a, (attentions)
+
+
+class MLP(nn.Module):
+ def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
+ super().__init__()
+ nx = config.n_embd
+ self.c_fc = Conv1D(n_state, nx)
+ self.c_proj = Conv1D(nx, n_state)
+ self.act = ACT_FNS[config.afn]
+ self.dropout = nn.Dropout(config.resid_pdrop)
+
+ def forward(self, x):
+ h = self.act(self.c_fc(x))
+ h2 = self.c_proj(h)
+ return self.dropout(h2)
+
+
+class Block(nn.Module):
+ def __init__(self, n_positions, config, scale=False):
+ super().__init__()
+ nx = config.n_embd
+ self.attn = Attention(nx, n_positions, config, scale)
+ self.ln_1 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
+ self.mlp = MLP(4 * nx, config)
+ self.ln_2 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
+
+ def forward(self, x, attention_mask=None, head_mask=None, output_attentions=False):
+ attn_outputs = self.attn(
+ x,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ )
+ a = attn_outputs[0]
+
+ n = self.ln_1(x + a)
+ m = self.mlp(n)
+ h = self.ln_2(n + m)
+
+ outputs = [h] + attn_outputs[1:]
+ return outputs
+
+
+class OpenAIGPTPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = OpenAIGPTConfig
+ load_tf_weights = load_tf_weights_in_openai_gpt
+ base_model_prefix = "transformer"
+
+ def _init_weights(self, module):
+ """Initialize the weights."""
+ if isinstance(module, (nn.Linear, Conv1D)):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+@dataclass
+class OpenAIGPTDoubleHeadsModelOutput(ModelOutput):
+ """
+ Base class for outputs of models predicting if two sentences are consecutive or not.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss.
+ mc_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mc_labels` is provided):
+ Multiple choice classification loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ mc_logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`):
+ Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ mc_loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ mc_logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+OPENAI_GPT_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`OpenAIGPTConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+OPENAI_GPT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare OpenAI GPT transformer model outputting raw hidden-states without any specific head on top.",
+ OPENAI_GPT_START_DOCSTRING,
+)
+class OpenAIGPTModel(OpenAIGPTPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.tokens_embed = nn.Embedding(config.vocab_size, config.n_embd)
+ self.positions_embed = nn.Embedding(config.n_positions, config.n_embd)
+ self.drop = nn.Dropout(config.embd_pdrop)
+ self.h = nn.ModuleList([Block(config.n_positions, config, scale=True) for _ in range(config.n_layer)])
+
+ self.register_buffer("position_ids", torch.arange(config.n_positions), persistent=False)
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.tokens_embed
+
+ def set_input_embeddings(self, new_embeddings):
+ self.tokens_embed = new_embeddings
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
+ """
+ for layer, heads in heads_to_prune.items():
+ self.h[layer].attn.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ input_ids = input_ids.view(-1, input_shape[-1])
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if position_ids is None:
+ # Code is different from when we had a single embedding matrix from position and token embeddings
+ position_ids = self.position_ids[None, : input_shape[-1]]
+
+ # Attention mask.
+ if attention_mask is not None:
+ # We create a 3D attention mask from a 2D tensor mask.
+ # Sizes are [batch_size, 1, 1, to_seq_length]
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
+ # this attention mask is more simple than the triangular masking of causal attention
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
+ attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
+
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
+ # masked positions, this operation will create a tensor which is 0.0 for
+ # positions we want to attend and the dtype's smallest value for masked positions.
+ # Since we are adding it to the raw scores before the softmax, this is
+ # effectively the same as removing these entirely.
+ attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
+ attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
+
+ # Prepare head mask if needed
+ head_mask = self.get_head_mask(head_mask, self.config.n_layer)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.tokens_embed(input_ids)
+ position_embeds = self.positions_embed(position_ids)
+ if token_type_ids is not None:
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
+ token_type_embeds = self.tokens_embed(token_type_ids)
+ else:
+ token_type_embeds = 0
+ hidden_states = inputs_embeds + position_embeds + token_type_embeds
+ hidden_states = self.drop(hidden_states)
+
+ output_shape = input_shape + (hidden_states.size(-1),)
+
+ all_attentions = () if output_attentions else None
+ all_hidden_states = () if output_hidden_states else None
+ for i, block in enumerate(self.h):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ outputs = block(hidden_states, attention_mask, head_mask[i], output_attentions=output_attentions)
+ hidden_states = outputs[0]
+ if output_attentions:
+ all_attentions = all_attentions + (outputs[1],)
+
+ hidden_states = hidden_states.view(*output_shape)
+ # Add last layer
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
+
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ OpenAI GPT Model transformer with a language modeling head on top (linear layer with weights tied to the input
+ embeddings).
+ """,
+ OPENAI_GPT_START_DOCSTRING,
+)
+class OpenAIGPTLMHeadModel(OpenAIGPTPreTrainedModel):
+ _tied_weights_keys = ["lm_head.weight"]
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.transformer = OpenAIGPTModel(config)
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=CausalLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], CausalLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.transformer(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = transformer_outputs[0]
+ lm_logits = self.lm_head(hidden_states)
+
+ loss = None
+ if labels is not None:
+ # Shift so that tokens < n predict n
+ shift_logits = lm_logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous()
+ # Flatten the tokens
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
+
+ if not return_dict:
+ output = (lm_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return CausalLMOutput(
+ loss=loss,
+ logits=lm_logits,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+ def prepare_inputs_for_generation(self, input_ids: torch.LongTensor, **kwargs) -> Dict[str, Any]:
+ return {"input_ids": input_ids}
+
+
+@add_start_docstrings(
+ """
+OpenAI GPT Model transformer with a language modeling and a multiple-choice classification head on top e.g. for
+RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the
+input embeddings, the classification head takes as input the input of a specified classification token index in the
+input sequence).
+""",
+ OPENAI_GPT_START_DOCSTRING,
+)
+class OpenAIGPTDoubleHeadsModel(OpenAIGPTPreTrainedModel):
+ _tied_weights_keys = ["lm_head.weight"]
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ config.num_labels = 1
+ self.transformer = OpenAIGPTModel(config)
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
+ self.multiple_choice_head = SequenceSummary(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=OpenAIGPTDoubleHeadsModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ mc_token_ids: Optional[torch.LongTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ mc_labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], OpenAIGPTDoubleHeadsModelOutput]:
+ r"""
+ mc_token_ids (`torch.LongTensor` of shape `(batch_size, num_choices)`, *optional*, default to index of the last token of the input):
+ Index of the classification token in each input sequence. Selected in the range `[0, input_ids.size(-1) -
+ 1]`.
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
+ `labels = input_ids` Indices are selected in `[-1, 0, ..., config.vocab_size]` All labels set to `-100` are
+ ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
+ mc_labels (`torch.LongTensor` of shape `(batch_size)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
+ where *num_choices* is the size of the second dimension of the input tensors. (see *input_ids* above)
+
+ Return:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, OpenAIGPTDoubleHeadsModel
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/openai-gpt")
+ >>> model = OpenAIGPTDoubleHeadsModel.from_pretrained("openai-community/openai-gpt")
+ >>> tokenizer.add_special_tokens(
+ ... {"cls_token": "[CLS]"}
+ ... ) # Add a [CLS] to the vocabulary (we should train it also!)
+ >>> model.resize_token_embeddings(len(tokenizer))
+
+ >>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
+ >>> input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
+ >>> mc_token_ids = torch.tensor([input_ids.size(-1) - 1, input_ids.size(-1) - 1]).unsqueeze(0) # Batch size 1
+
+ >>> outputs = model(input_ids, mc_token_ids=mc_token_ids)
+ >>> lm_logits = outputs.logits
+ >>> mc_logits = outputs.mc_logits
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.transformer(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = transformer_outputs[0]
+
+ lm_logits = self.lm_head(hidden_states)
+ mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)
+
+ lm_loss, mc_loss = None, None
+ if mc_labels is not None:
+ loss_fct = CrossEntropyLoss()
+ mc_loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1))
+ if labels is not None:
+ shift_logits = lm_logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous()
+ loss_fct = CrossEntropyLoss()
+ lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
+
+ if not return_dict:
+ output = (lm_logits, mc_logits) + transformer_outputs[1:]
+ if mc_loss is not None:
+ output = (mc_loss,) + output
+ return ((lm_loss,) + output) if lm_loss is not None else output
+
+ return OpenAIGPTDoubleHeadsModelOutput(
+ loss=lm_loss,
+ mc_loss=mc_loss,
+ logits=lm_logits,
+ mc_logits=mc_logits,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ The Original OpenAI GPT Model transformer with a sequence classification head on top (linear layer).
+ [`OpenAIGPTForSequenceClassification`] uses the last token in order to do the classification, as other causal
+ models (e.g. GPT-2) do. Since it does classification on the last token, it requires to know the position of the
+ last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding
+ token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since
+ it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take
+ the last value in each row of the batch).
+ """,
+ OPENAI_GPT_START_DOCSTRING,
+)
+class OpenAIGPTForSequenceClassification(OpenAIGPTPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.transformer = OpenAIGPTModel(config)
+ self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=SequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.transformer(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = transformer_outputs[0]
+ logits = self.score(hidden_states)
+
+ if input_ids is not None:
+ batch_size, sequence_length = input_ids.shape[:2]
+ else:
+ batch_size, sequence_length = inputs_embeds.shape[:2]
+
+ # Ensure the batch size is > 1 if there is no padding.
+ if self.config.pad_token_id is None and batch_size != 1:
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
+
+ if self.config.pad_token_id is None:
+ sequence_lengths = -1
+ else:
+ if input_ids is not None:
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
+ sequence_lengths = sequence_lengths.to(logits.device)
+ else:
+ sequence_lengths = -1
+ logger.warning(
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
+ )
+
+ pooled_logits = logits[range(batch_size), sequence_lengths]
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(pooled_logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(pooled_logits, labels)
+ if not return_dict:
+ output = (pooled_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutput(
+ loss=loss,
+ logits=pooled_logits,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/modeling_tf_openai.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/modeling_tf_openai.py
new file mode 100644
index 0000000000000000000000000000000000000000..b826936c51fbd672c669a5cc6157d042453cfe16
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/modeling_tf_openai.py
@@ -0,0 +1,940 @@
+# coding=utf-8
+# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" TF 2.0 OpenAI GPT model."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ...activations_tf import get_tf_activation
+from ...modeling_tf_outputs import TFBaseModelOutput, TFCausalLMOutput, TFSequenceClassifierOutput
+from ...modeling_tf_utils import (
+ TFCausalLanguageModelingLoss,
+ TFConv1D,
+ TFModelInputType,
+ TFPreTrainedModel,
+ TFSequenceClassificationLoss,
+ TFSequenceSummary,
+ TFSharedEmbeddings,
+ get_initializer,
+ keras,
+ keras_serializable,
+ unpack_inputs,
+)
+from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_openai import OpenAIGPTConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "openai-community/openai-gpt"
+_CONFIG_FOR_DOC = "OpenAIGPTConfig"
+
+
+from ..deprecated._archive_maps import TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+class TFAttention(keras.layers.Layer):
+ def __init__(self, nx, config, scale=False, **kwargs):
+ super().__init__(**kwargs)
+
+ n_state = nx # in Attention: n_state=768 (nx=n_embd)
+ # [switch nx => n_state from Block to Attention to keep identical to TF implementation]
+ assert (
+ n_state % config.n_head == 0
+ ), f"Hidden dimension {n_state} not dividable by number of heads {config.n_head}"
+ self.n_head = config.n_head
+ self.split_size = n_state
+ self.scale = scale
+ self.output_attentions = config.output_attentions
+
+ self.c_attn = TFConv1D(n_state * 3, nx, initializer_range=config.initializer_range, name="c_attn")
+ self.c_proj = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="c_proj")
+ self.attn_dropout = keras.layers.Dropout(config.attn_pdrop)
+ self.resid_dropout = keras.layers.Dropout(config.resid_pdrop)
+ self.n_state = n_state
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ pass
+
+ @staticmethod
+ def causal_attention_mask(nd, ns):
+ """
+ 1's in the lower triangle, counting from the lower right corner. Same as tf.matrix_band_part(tf.ones([nd, ns]),
+ -1, ns-nd), but doesn't produce garbage on TPUs.
+ """
+ i = tf.range(nd)[:, None]
+ j = tf.range(ns)
+ m = i >= j - ns + nd
+ return m
+
+ def _attn(self, q, k, v, attention_mask, head_mask, output_attentions, training=False):
+ # q, k, v have shape [batch, heads, sequence, features]
+ w = tf.matmul(q, k, transpose_b=True)
+ if self.scale:
+ dk = tf.cast(shape_list(k)[-1], dtype=w.dtype) # scale attention_scores
+ w = w / tf.math.sqrt(dk)
+
+ # w has shape [batch, heads, dst_sequence, src_sequence], where information flows from src to dst.
+ _, _, nd, ns = shape_list(w)
+ b = tf.cast(self.causal_attention_mask(nd, ns), dtype=w.dtype)
+ b = tf.reshape(b, [1, 1, nd, ns])
+ w = w * b - 1e4 * (1 - b)
+
+ if attention_mask is not None:
+ # Apply the attention mask
+ attention_mask = tf.cast(attention_mask, dtype=w.dtype)
+ w = w + attention_mask
+
+ w = stable_softmax(w, axis=-1)
+ w = self.attn_dropout(w, training=training)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ w = w * head_mask
+
+ outputs = [tf.matmul(w, v)]
+ if output_attentions:
+ outputs.append(w)
+ return outputs
+
+ def merge_heads(self, x):
+ x = tf.transpose(x, [0, 2, 1, 3])
+ x_shape = shape_list(x)
+ new_x_shape = x_shape[:-2] + [x_shape[-2] * x_shape[-1]]
+ return tf.reshape(x, new_x_shape)
+
+ def split_heads(self, x):
+ x_shape = shape_list(x)
+ new_x_shape = x_shape[:-1] + [self.n_head, x_shape[-1] // self.n_head]
+ x = tf.reshape(x, new_x_shape)
+ return tf.transpose(x, (0, 2, 1, 3)) # (batch, head, seq_length, head_features)
+
+ def call(self, x, attention_mask, head_mask, output_attentions, training=False):
+ x = self.c_attn(x)
+ query, key, value = tf.split(x, 3, axis=2)
+ query = self.split_heads(query)
+ key = self.split_heads(key)
+ value = self.split_heads(value)
+
+ attn_outputs = self._attn(query, key, value, attention_mask, head_mask, output_attentions, training=training)
+ a = attn_outputs[0]
+
+ a = self.merge_heads(a)
+ a = self.c_proj(a)
+ a = self.resid_dropout(a, training=training)
+
+ outputs = [a] + attn_outputs[1:]
+ return outputs # a, (attentions)
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "c_attn", None) is not None:
+ with tf.name_scope(self.c_attn.name):
+ self.c_attn.build([None, None, self.n_state * 3])
+ if getattr(self, "c_proj", None) is not None:
+ with tf.name_scope(self.c_proj.name):
+ self.c_proj.build([None, None, self.n_state])
+
+
+class TFMLP(keras.layers.Layer):
+ def __init__(self, n_state, config, **kwargs):
+ super().__init__(**kwargs)
+ nx = config.n_embd
+ self.c_fc = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="c_fc")
+ self.c_proj = TFConv1D(nx, n_state, initializer_range=config.initializer_range, name="c_proj")
+ self.act = get_tf_activation("gelu")
+ self.dropout = keras.layers.Dropout(config.resid_pdrop)
+ self.nx = nx
+ self.n_state = n_state
+
+ def call(self, x, training=False):
+ h = self.act(self.c_fc(x))
+ h2 = self.c_proj(h)
+ h2 = self.dropout(h2, training=training)
+ return h2
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "c_fc", None) is not None:
+ with tf.name_scope(self.c_fc.name):
+ self.c_fc.build([None, None, self.n_state])
+ if getattr(self, "c_proj", None) is not None:
+ with tf.name_scope(self.c_proj.name):
+ self.c_proj.build([None, None, self.nx])
+
+
+class TFBlock(keras.layers.Layer):
+ def __init__(self, config, scale=False, **kwargs):
+ super().__init__(**kwargs)
+ nx = config.n_embd
+ self.attn = TFAttention(nx, config, scale, name="attn")
+ self.ln_1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_1")
+ self.mlp = TFMLP(4 * nx, config, name="mlp")
+ self.ln_2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_2")
+ self.nx = nx
+
+ def call(self, x, attention_mask, head_mask, output_attentions, training=False):
+ output_attn = self.attn(x, attention_mask, head_mask, output_attentions, training=training)
+ a = output_attn[0] # output_attn: a, (attentions)
+
+ n = self.ln_1(x + a)
+ m = self.mlp(n, training=training)
+ h = self.ln_2(n + m)
+
+ outputs = [h] + output_attn[1:]
+ return outputs # x, (attentions)
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "attn", None) is not None:
+ with tf.name_scope(self.attn.name):
+ self.attn.build(None)
+ if getattr(self, "ln_1", None) is not None:
+ with tf.name_scope(self.ln_1.name):
+ self.ln_1.build([None, None, self.nx])
+ if getattr(self, "mlp", None) is not None:
+ with tf.name_scope(self.mlp.name):
+ self.mlp.build(None)
+ if getattr(self, "ln_2", None) is not None:
+ with tf.name_scope(self.ln_2.name):
+ self.ln_2.build([None, None, self.nx])
+
+
+@keras_serializable
+class TFOpenAIGPTMainLayer(keras.layers.Layer):
+ config_class = OpenAIGPTConfig
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(*inputs, **kwargs)
+
+ self.config = config
+ self.output_hidden_states = config.output_hidden_states
+ self.output_attentions = config.output_attentions
+ self.return_dict = config.use_return_dict
+ self.num_hidden_layers = config.n_layer
+ self.n_embd = config.n_embd
+ self.n_positions = config.n_positions
+ self.initializer_range = config.initializer_range
+
+ self.tokens_embed = TFSharedEmbeddings(
+ config.vocab_size, config.n_embd, initializer_range=config.initializer_range, name="tokens_embed"
+ )
+ self.drop = keras.layers.Dropout(config.embd_pdrop)
+ self.h = [TFBlock(config, scale=True, name=f"h_._{i}") for i in range(config.n_layer)]
+
+ def build(self, input_shape=None):
+ with tf.name_scope("positions_embed"):
+ self.positions_embed = self.add_weight(
+ name="embeddings",
+ shape=[self.n_positions, self.n_embd],
+ initializer=get_initializer(self.initializer_range),
+ )
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "tokens_embed", None) is not None:
+ with tf.name_scope(self.tokens_embed.name):
+ self.tokens_embed.build(None)
+ if getattr(self, "h", None) is not None:
+ for layer in self.h:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+ def get_input_embeddings(self):
+ return self.tokens_embed
+
+ def set_input_embeddings(self, value):
+ self.tokens_embed.weight = value
+ self.tokens_embed.vocab_size = shape_list(value)[0]
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
+ """
+ raise NotImplementedError
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ ) -> Union[Tuple, TFBaseModelOutput]:
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = shape_list(input_ids)
+ input_ids = tf.reshape(input_ids, [-1, input_shape[-1]])
+ elif inputs_embeds is not None:
+ input_shape = shape_list(inputs_embeds)[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if position_ids is None:
+ position_ids = tf.expand_dims(tf.range(input_shape[-1]), axis=0)
+
+ if attention_mask is not None:
+ # We create a 3D attention mask from a 2D tensor mask.
+ # Sizes are [batch_size, 1, 1, to_seq_length]
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
+ # this attention mask is more simple than the triangular masking of causal attention
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
+ attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1]))
+
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
+ # masked positions, this operation will create a tensor which is 0.0 for
+ # positions we want to attend and -10000.0 for masked positions.
+ # Since we are adding it to the raw scores before the softmax, this is
+ # effectively the same as removing these entirely.
+
+ one_cst = tf.constant(1.0)
+ attention_mask = tf.cast(attention_mask, dtype=one_cst.dtype)
+ attention_mask = tf.multiply(tf.subtract(one_cst, attention_mask), tf.constant(-10000.0))
+ else:
+ attention_mask = None
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ if head_mask is not None:
+ raise NotImplementedError
+ else:
+ head_mask = [None] * self.num_hidden_layers
+ # head_mask = tf.constant([0] * self.num_hidden_layers)
+
+ position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])
+
+ if inputs_embeds is None:
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
+ inputs_embeds = self.tokens_embed(input_ids, mode="embedding")
+ position_embeds = tf.gather(self.positions_embed, position_ids)
+ if token_type_ids is not None:
+ token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]])
+ check_embeddings_within_bounds(token_type_ids, self.config.vocab_size, "token_type_ids")
+ token_type_embeds = self.tokens_embed(token_type_ids, mode="embedding")
+ else:
+ token_type_embeds = 0
+ hidden_states = inputs_embeds + position_embeds + token_type_embeds
+ hidden_states = self.drop(hidden_states, training=training)
+
+ output_shape = input_shape + [shape_list(hidden_states)[-1]]
+
+ all_attentions = () if output_attentions else None
+ all_hidden_states = () if output_hidden_states else None
+ for i, block in enumerate(self.h):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)
+
+ outputs = block(
+ hidden_states,
+ attention_mask,
+ head_mask[i],
+ output_attentions,
+ training=training,
+ )
+ hidden_states = outputs[0]
+ if output_attentions:
+ all_attentions = all_attentions + (outputs[1],)
+
+ hidden_states = tf.reshape(hidden_states, output_shape)
+ # Add last hidden state
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if output_attentions:
+ # let the number of heads free (-1) so we can extract attention even after head pruning
+ attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]
+ all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
+
+ return TFBaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_attentions,
+ )
+
+
+class TFOpenAIGPTPreTrainedModel(TFPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = OpenAIGPTConfig
+ base_model_prefix = "transformer"
+
+
+@dataclass
+class TFOpenAIGPTDoubleHeadsModelOutput(ModelOutput):
+ """
+ Base class for outputs of models predicting if two sentences are consecutive or not.
+
+ Args:
+ logits (`tf.Tensor` of shape `(batch_size, num_choices, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ mc_logits (`tf.Tensor` of shape `(batch_size, num_choices)`):
+ Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ logits: tf.Tensor = None
+ mc_logits: tf.Tensor = None
+ hidden_states: Tuple[tf.Tensor] | None = None
+ attentions: Tuple[tf.Tensor] | None = None
+
+
+OPENAI_GPT_START_DOCSTRING = r"""
+
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TensorFlow models and layers in `transformers` accept two formats as input:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional argument.
+
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
+ positional argument:
+
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
+
+ Note that when creating models and layers with
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
+ about any of this, as you can just pass inputs like you would to any other Python function!
+
+
+
+ Parameters:
+ config ([`OpenAIGPTConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+OPENAI_GPT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
+ [`PreTrainedTokenizer.encode`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ head_mask (`tf.Tensor` or `Numpy array` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
+ config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+"""
+
+
+@add_start_docstrings(
+ "The bare OpenAI GPT transformer model outputting raw hidden-states without any specific head on top.",
+ OPENAI_GPT_START_DOCSTRING,
+)
+class TFOpenAIGPTModel(TFOpenAIGPTPreTrainedModel):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.transformer = TFOpenAIGPTMainLayer(config, name="transformer")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFBaseModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ ) -> Union[Tuple, TFBaseModelOutput]:
+ outputs = self.transformer(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "transformer", None) is not None:
+ with tf.name_scope(self.transformer.name):
+ self.transformer.build(None)
+
+
+@add_start_docstrings(
+ """
+ OpenAI GPT Model transformer with a language modeling head on top (linear layer with weights tied to the input
+ embeddings).
+ """,
+ OPENAI_GPT_START_DOCSTRING,
+)
+class TFOpenAIGPTLMHeadModel(TFOpenAIGPTPreTrainedModel, TFCausalLanguageModelingLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.transformer = TFOpenAIGPTMainLayer(config, name="transformer")
+ # OpenAIGPT does not have past caching features
+ self.supports_xla_generation = False
+
+ def get_output_embeddings(self):
+ return self.get_input_embeddings()
+
+ def set_output_embeddings(self, value):
+ self.set_input_embeddings(value)
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFCausalLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[Tuple, TFCausalLMOutput]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,
+ config.vocab_size - 1]`.
+ """
+
+ transformer_outputs = self.transformer(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ hidden_states = transformer_outputs[0]
+
+ logits = self.transformer.tokens_embed(hidden_states, mode="linear")
+
+ loss = None
+ if labels is not None:
+ # shift labels to the left and cut last logit token
+ shifted_logits = logits[:, :-1]
+ labels = labels[:, 1:]
+ loss = self.hf_compute_loss(labels, shifted_logits)
+
+ if not return_dict:
+ output = (logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFCausalLMOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+ def prepare_inputs_for_generation(self, inputs, **kwargs):
+ return {"input_ids": inputs}
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "transformer", None) is not None:
+ with tf.name_scope(self.transformer.name):
+ self.transformer.build(None)
+
+
+@add_start_docstrings(
+ """
+ OpenAI GPT Model transformer with a language modeling and a multiple-choice classification head on top e.g. for
+ RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the
+ input embeddings, the classification head takes as input the input of a specified classification token index in the
+ input sequence).
+ """,
+ OPENAI_GPT_START_DOCSTRING,
+)
+class TFOpenAIGPTDoubleHeadsModel(TFOpenAIGPTPreTrainedModel):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ config.num_labels = 1
+ self.transformer = TFOpenAIGPTMainLayer(config, name="transformer")
+ self.multiple_choice_head = TFSequenceSummary(
+ config, initializer_range=config.initializer_range, name="multiple_choice_head"
+ )
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TFOpenAIGPTDoubleHeadsModelOutput, config_class=_CONFIG_FOR_DOC)
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ mc_token_ids: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ ) -> Union[Tuple, TFOpenAIGPTDoubleHeadsModelOutput]:
+ r"""
+ mc_token_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, num_choices)`, *optional*, default to index of the last token of the input):
+ Index of the classification token in each input sequence. Selected in the range `[0, input_ids.size(-1) -
+ 1]`.
+
+ Return:
+
+ Examples:
+
+ ```python
+ >>> import tensorflow as tf
+ >>> from transformers import AutoTokenizer, TFOpenAIGPTDoubleHeadsModel
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/openai-gpt")
+ >>> model = TFOpenAIGPTDoubleHeadsModel.from_pretrained("openai-community/openai-gpt")
+
+ >>> # Add a [CLS] to the vocabulary (we should train it also!)
+ >>> tokenizer.add_special_tokens({"cls_token": "[CLS]"})
+ >>> model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size
+ >>> print(tokenizer.cls_token_id, len(tokenizer)) # The newly token the last token of the vocabulary
+
+ >>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
+ >>> encoding = tokenizer(choices, return_tensors="tf")
+ >>> inputs = {k: tf.expand_dims(v, 0) for k, v in encoding.items()}
+ >>> inputs["mc_token_ids"] = tf.constant(
+ ... [inputs["input_ids"].shape[-1] - 1, inputs["input_ids"].shape[-1] - 1]
+ ... )[
+ ... None, :
+ ... ] # Batch size 1
+ >>> outputs = model(inputs)
+ >>> lm_prediction_scores, mc_prediction_scores = outputs[:2]
+ ```"""
+
+ if input_ids is not None:
+ input_shapes = shape_list(input_ids)
+ else:
+ input_shapes = shape_list(inputs_embeds)[:-1]
+
+ seq_length = input_shapes[-1]
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
+ flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
+ flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
+ flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None
+ transformer_outputs = self.transformer(
+ flat_input_ids,
+ flat_attention_mask,
+ flat_token_type_ids,
+ flat_position_ids,
+ head_mask,
+ inputs_embeds,
+ output_attentions,
+ output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ hidden_states = transformer_outputs[0]
+ hidden_states = tf.reshape(hidden_states, input_shapes + shape_list(hidden_states)[-1:])
+ if return_dict and output_hidden_states:
+ # We do this to match the slightly odd PT behaviour - the final hidden state is reshaped to rank 4 when the
+ # input is rank 3, but all other hidden states remain at rank-3 (with the first 2 dims merged)
+ all_hidden_states = transformer_outputs.hidden_states[:-1] + (hidden_states,)
+ else:
+ all_hidden_states = None
+ lm_logits = self.transformer.tokens_embed(hidden_states, mode="linear")
+ mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids, training=training)
+ mc_logits = tf.squeeze(mc_logits, axis=-1)
+
+ if not return_dict:
+ return (lm_logits, mc_logits) + transformer_outputs[1:]
+
+ return TFOpenAIGPTDoubleHeadsModelOutput(
+ logits=lm_logits,
+ mc_logits=mc_logits,
+ hidden_states=all_hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+ @property
+ def input_signature(self):
+ return {
+ "input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"),
+ "attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"),
+ "mc_token_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"),
+ }
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "transformer", None) is not None:
+ with tf.name_scope(self.transformer.name):
+ self.transformer.build(None)
+ if getattr(self, "multiple_choice_head", None) is not None:
+ with tf.name_scope(self.multiple_choice_head.name):
+ self.multiple_choice_head.build(None)
+
+
+@add_start_docstrings(
+ """
+ The OpenAI GPT Model transformer with a sequence classification head on top (linear layer).
+
+ [`TFOpenAIGPTForSequenceClassification`] uses the last token in order to do the classification, as other causal
+ models (e.g. GPT-2) do.
+
+ Since it does classification on the last token, it requires to know the position of the last token. If a
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
+ each row of the batch).
+ """,
+ OPENAI_GPT_START_DOCSTRING,
+)
+class TFOpenAIGPTForSequenceClassification(TFOpenAIGPTPreTrainedModel, TFSequenceClassificationLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.num_labels = config.num_labels
+ self.score = keras.layers.Dense(
+ config.num_labels,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="score",
+ use_bias=False,
+ )
+ self.transformer = TFOpenAIGPTMainLayer(config, name="transformer")
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFSequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[Tuple, TFSequenceClassifierOutput]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,
+ config.vocab_size - 1]`.
+ """
+ transformer_outputs = self.transformer(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ hidden_states = transformer_outputs[0]
+ logits = self.score(hidden_states)
+ in_logits = None
+ if self.config.pad_token_id is None:
+ sequence_lengths = -1
+ else:
+ if input_ids is not None:
+ sequence_lengths = (
+ tf.argmax(tf.cast(tf.math.equal(input_ids, self.config.pad_token_id), input_ids.dtype), axis=-1)
+ - 1
+ )
+ sequence_lengths = tf.where(sequence_lengths >= 0, sequence_lengths, input_ids.shape[-1] - 1)
+ in_logits = tf.gather(logits, sequence_lengths, batch_dims=1, axis=1)
+ else:
+ sequence_lengths = -1
+ logger.warning(
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
+ )
+ loss = None
+
+ if labels is not None:
+ if input_ids is not None:
+ batch_size, sequence_length = shape_list(input_ids)[:2]
+ else:
+ batch_size, sequence_length = shape_list(inputs_embeds)[:2]
+ assert (
+ self.config.pad_token_id is not None or batch_size == 1
+ ), "Cannot handle batch sizes > 1 if no padding token is defined."
+
+ if not tf.is_tensor(sequence_lengths):
+ in_logits = logits[0:batch_size, sequence_lengths]
+
+ loss = self.hf_compute_loss(tf.reshape(labels, [-1, 1]), tf.reshape(in_logits, [-1, self.num_labels]))
+
+ pooled_logits = in_logits if in_logits is not None else logits
+
+ if not return_dict:
+ output = (pooled_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFSequenceClassifierOutput(
+ loss=loss,
+ logits=pooled_logits,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "score", None) is not None:
+ with tf.name_scope(self.score.name):
+ self.score.build([None, None, self.config.n_embd])
+ if getattr(self, "transformer", None) is not None:
+ with tf.name_scope(self.transformer.name):
+ self.transformer.build(None)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/tokenization_openai.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/tokenization_openai.py
new file mode 100644
index 0000000000000000000000000000000000000000..4f2b27916092b2d11cae2955a0179218ac9c9de6
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/tokenization_openai.py
@@ -0,0 +1,394 @@
+# coding=utf-8
+# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for OpenAI GPT."""
+
+
+import json
+import os
+import re
+import unicodedata
+from typing import Optional, Tuple
+
+from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {
+ "vocab_file": "vocab.json",
+ "merges_file": "merges.txt",
+}
+
+
+# Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
+def whitespace_tokenize(text):
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
+ text = text.strip()
+ if not text:
+ return []
+ tokens = text.split()
+ return tokens
+
+
+# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
+class BasicTokenizer(object):
+ """
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
+
+ Args:
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ never_split (`Iterable`, *optional*):
+ Collection of tokens which will never be split during tokenization. Only has an effect when
+ `do_basic_tokenize=True`
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters.
+
+ This should likely be deactivated for Japanese (see this
+ [issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
+ the full context of the words, such as contractions.
+ """
+
+ def __init__(
+ self,
+ do_lower_case=True,
+ never_split=None,
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ do_split_on_punc=True,
+ ):
+ if never_split is None:
+ never_split = []
+ self.do_lower_case = do_lower_case
+ self.never_split = set(never_split)
+ self.tokenize_chinese_chars = tokenize_chinese_chars
+ self.strip_accents = strip_accents
+ self.do_split_on_punc = do_split_on_punc
+
+ def tokenize(self, text, never_split=None):
+ """
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
+
+ Args:
+ never_split (`List[str]`, *optional*)
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
+ """
+ # union() returns a new set by concatenating the two sets.
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
+ text = self._clean_text(text)
+
+ # This was added on November 1st, 2018 for the multilingual and Chinese
+ # models. This is also applied to the English models now, but it doesn't
+ # matter since the English models were not trained on any Chinese data
+ # and generally don't have any Chinese data in them (there are Chinese
+ # characters in the vocabulary because Wikipedia does have some Chinese
+ # words in the English Wikipedia.).
+ if self.tokenize_chinese_chars:
+ text = self._tokenize_chinese_chars(text)
+ # prevents treating the same character with different unicode codepoints as different characters
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
+ split_tokens = []
+ for token in orig_tokens:
+ if token not in never_split:
+ if self.do_lower_case:
+ token = token.lower()
+ if self.strip_accents is not False:
+ token = self._run_strip_accents(token)
+ elif self.strip_accents:
+ token = self._run_strip_accents(token)
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
+
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
+ return output_tokens
+
+ def _run_strip_accents(self, text):
+ """Strips accents from a piece of text."""
+ text = unicodedata.normalize("NFD", text)
+ output = []
+ for char in text:
+ cat = unicodedata.category(char)
+ if cat == "Mn":
+ continue
+ output.append(char)
+ return "".join(output)
+
+ def _run_split_on_punc(self, text, never_split=None):
+ """Splits punctuation on a piece of text."""
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
+ return [text]
+ chars = list(text)
+ i = 0
+ start_new_word = True
+ output = []
+ while i < len(chars):
+ char = chars[i]
+ if _is_punctuation(char):
+ output.append([char])
+ start_new_word = True
+ else:
+ if start_new_word:
+ output.append([])
+ start_new_word = False
+ output[-1].append(char)
+ i += 1
+
+ return ["".join(x) for x in output]
+
+ def _tokenize_chinese_chars(self, text):
+ """Adds whitespace around any CJK character."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if self._is_chinese_char(cp):
+ output.append(" ")
+ output.append(char)
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+ def _is_chinese_char(self, cp):
+ """Checks whether CP is the codepoint of a CJK character."""
+ # This defines a "chinese character" as anything in the CJK Unicode block:
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
+ #
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
+ # despite its name. The modern Korean Hangul alphabet is a different block,
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
+ # space-separated words, so they are not treated specially and handled
+ # like the all of the other languages.
+ if (
+ (cp >= 0x4E00 and cp <= 0x9FFF)
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
+ or (cp >= 0xF900 and cp <= 0xFAFF)
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
+ ): #
+ return True
+
+ return False
+
+ def _clean_text(self, text):
+ """Performs invalid character removal and whitespace cleanup on text."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
+ continue
+ if _is_whitespace(char):
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+
+def get_pairs(word):
+ """
+ Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length
+ strings)
+ """
+ pairs = set()
+ prev_char = word[0]
+ for char in word[1:]:
+ pairs.add((prev_char, char))
+ prev_char = char
+ return pairs
+
+
+def text_standardize(text):
+ """
+ fixes some issues the spacy tokenizer had on books corpus also does some whitespace standardization
+ """
+ text = text.replace("—", "-")
+ text = text.replace("–", "-")
+ text = text.replace("―", "-")
+ text = text.replace("…", "...")
+ text = text.replace("´", "'")
+ text = re.sub(r"""(-+|~+|!+|"+|;+|\?+|\++|,+|\)+|\(+|\\+|\/+|\*+|\[+|\]+|}+|{+|\|+|_+)""", r" \1 ", text)
+ text = re.sub(r"\s*\n\s*", " \n ", text)
+ text = re.sub(r"[^\S\n]+", " ", text)
+ return text.strip()
+
+
+class OpenAIGPTTokenizer(PreTrainedTokenizer):
+ """
+ Construct a GPT Tokenizer. Based on Byte-Pair-Encoding with the following peculiarities:
+
+ - lowercases all inputs,
+ - uses `SpaCy` tokenizer and `ftfy` for pre-BPE tokenization if they are installed, fallback to BERT's
+ `BasicTokenizer` if not.
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ merges_file (`str`):
+ Path to the merges file.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(self, vocab_file, merges_file, unk_token="", **kwargs):
+ try:
+ import ftfy
+ from spacy.lang.en import English
+
+ _nlp = English()
+ self.nlp = _nlp.tokenizer
+ self.fix_text = ftfy.fix_text
+ except ImportError:
+ logger.warning("ftfy or spacy is not installed using BERT BasicTokenizer instead of SpaCy & ftfy.")
+ self.nlp = BasicTokenizer(do_lower_case=True)
+ self.fix_text = None
+
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
+ self.encoder = json.load(vocab_handle)
+ self.decoder = {v: k for k, v in self.encoder.items()}
+ with open(merges_file, encoding="utf-8") as merges_handle:
+ merges = merges_handle.read().split("\n")[1:-1]
+ merges = [tuple(merge.split()) for merge in merges]
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
+ self.cache = {}
+
+ super().__init__(unk_token=unk_token, **kwargs)
+
+ @property
+ def do_lower_case(self):
+ return True
+
+ @property
+ def vocab_size(self):
+ return len(self.encoder)
+
+ def get_vocab(self):
+ return dict(self.encoder, **self.added_tokens_encoder)
+
+ def bpe(self, token):
+ word = tuple(token[:-1]) + (token[-1] + "",)
+ if token in self.cache:
+ return self.cache[token]
+ pairs = get_pairs(word)
+
+ if not pairs:
+ return token + ""
+
+ while True:
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
+ if bigram not in self.bpe_ranks:
+ break
+ first, second = bigram
+ new_word = []
+ i = 0
+ while i < len(word):
+ try:
+ j = word.index(first, i)
+ except ValueError:
+ new_word.extend(word[i:])
+ break
+ else:
+ new_word.extend(word[i:j])
+ i = j
+
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
+ new_word.append(first + second)
+ i += 2
+ else:
+ new_word.append(word[i])
+ i += 1
+ new_word = tuple(new_word)
+ word = new_word
+ if len(word) == 1:
+ break
+ else:
+ pairs = get_pairs(word)
+ word = " ".join(word)
+ if word == "\n ":
+ word = "\n"
+ self.cache[token] = word
+ return word
+
+ def _tokenize(self, text):
+ """Tokenize a string."""
+ split_tokens = []
+ if self.fix_text is None:
+ # Using BERT's BasicTokenizer
+ text = self.nlp.tokenize(text)
+ for token in text:
+ split_tokens.extend(list(self.bpe(token).split(" ")))
+ else:
+ # Using SpaCy & ftfy (original tokenization process of OpenAI GPT)
+ text = self.nlp(text_standardize(self.fix_text(text)))
+ for token in text:
+ split_tokens.extend(list(self.bpe(token.text.lower()).split(" ")))
+ return split_tokens
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
+
+ def _convert_id_to_token(self, index):
+ """Converts an id in a token (BPE) using the vocab."""
+ return self.decoder.get(index, self.unk_token)
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ out_string = "".join(tokens).replace("", " ").strip()
+ return out_string
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+ merge_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
+ )
+
+ with open(vocab_file, "w", encoding="utf-8") as f:
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
+
+ index = 0
+ with open(merge_file, "w", encoding="utf-8") as writer:
+ writer.write("#version: 0.2\n")
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
+ if index != token_index:
+ logger.warning(
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
+ " Please check that the tokenizer is not corrupted!"
+ )
+ index = token_index
+ writer.write(" ".join(bpe_tokens) + "\n")
+ index += 1
+
+ return vocab_file, merge_file
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/tokenization_openai_fast.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/tokenization_openai_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..214db5385044eb8de3518fe379b0f766d8392350
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/openai/tokenization_openai_fast.py
@@ -0,0 +1,64 @@
+# coding=utf-8
+# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Fast Tokenization classes for OpenAI GPT."""
+
+
+from typing import Optional, Tuple
+
+from ...tokenization_utils_fast import PreTrainedTokenizerFast
+from ...utils import logging
+from .tokenization_openai import OpenAIGPTTokenizer
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
+
+
+class OpenAIGPTTokenizerFast(PreTrainedTokenizerFast):
+ """
+ Construct a "fast" GPT Tokenizer (backed by HuggingFace's *tokenizers* library). Based on Byte-Pair-Encoding with
+ the following peculiarities:
+
+ - lower case all inputs
+ - uses BERT's BasicTokenizer for pre-BPE tokenization
+
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
+ refer to this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ merges_file (`str`):
+ Path to the merges file.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "attention_mask"]
+ slow_tokenizer_class = OpenAIGPTTokenizer
+
+ def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, unk_token="", **kwargs):
+ super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, unk_token=unk_token, **kwargs)
+
+ @property
+ def do_lower_case(self):
+ return True
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
+ return tuple(files)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/qwen2_moe/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/qwen2_moe/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f083b454d554a09ef2c0479ef7ae7053cc6e023c
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/qwen2_moe/__init__.py
@@ -0,0 +1,62 @@
+# Copyright 2024 The Qwen Team and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "configuration_qwen2_moe": ["QWEN2MOE_PRETRAINED_CONFIG_ARCHIVE_MAP", "Qwen2MoeConfig"],
+}
+
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_qwen2_moe"] = [
+ "Qwen2MoeForCausalLM",
+ "Qwen2MoeModel",
+ "Qwen2MoePreTrainedModel",
+ "Qwen2MoeForSequenceClassification",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_qwen2_moe import QWEN2MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, Qwen2MoeConfig
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_qwen2_moe import (
+ Qwen2MoeForCausalLM,
+ Qwen2MoeForSequenceClassification,
+ Qwen2MoeModel,
+ Qwen2MoePreTrainedModel,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/qwen2_moe/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/qwen2_moe/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..feeb5adc8edb0e3d452277f36d41e45c619fbaec
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/qwen2_moe/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/qwen2_moe/__pycache__/configuration_qwen2_moe.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/qwen2_moe/__pycache__/configuration_qwen2_moe.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2b7e2317210b0911b5ee2df684818cfe56ddd01b
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/qwen2_moe/__pycache__/configuration_qwen2_moe.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/qwen2_moe/__pycache__/modeling_qwen2_moe.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/qwen2_moe/__pycache__/modeling_qwen2_moe.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1b47eaa674190b94f5f7177d9eeb570cab2e176d
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/qwen2_moe/__pycache__/modeling_qwen2_moe.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/qwen2_moe/configuration_qwen2_moe.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/qwen2_moe/configuration_qwen2_moe.py
new file mode 100644
index 0000000000000000000000000000000000000000..e3f516ed9c2de4d725b3f8f329768ef71916cb62
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/qwen2_moe/configuration_qwen2_moe.py
@@ -0,0 +1,175 @@
+# coding=utf-8
+# Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Qwen2MoE model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+QWEN2MOE_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "Qwen/Qwen1.5-MoE-A2.7B": "https://huggingface.co/Qwen/Qwen1.5-MoE-A2.7B/resolve/main/config.json",
+}
+
+
+class Qwen2MoeConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`Qwen2MoeModel`]. It is used to instantiate a
+ Qwen2MoE model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of
+ Qwen1.5-MoE-A2.7B" [Qwen/Qwen1.5-MoE-A2.7B"](https://huggingface.co/Qwen/Qwen1.5-MoE-A2.7B").
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 151936):
+ Vocabulary size of the Qwen2MoE model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`Qwen2MoeModel`]
+ hidden_size (`int`, *optional*, defaults to 2048):
+ Dimension of the hidden representations.
+ intermediate_size (`int`, *optional*, defaults to 5632):
+ Dimension of the MLP representations.
+ num_hidden_layers (`int`, *optional*, defaults to 24):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ num_key_value_heads (`int`, *optional*, defaults to 16):
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
+ by meanpooling all the original heads within that group. For more details checkout [this
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `32`.
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
+ The non-linear activation function (function or string) in the decoder.
+ max_position_embeddings (`int`, *optional*, defaults to 32768):
+ The maximum sequence length that this model might ever be used with.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
+ The epsilon used by the rms normalization layers.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`.
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
+ Whether the model's input and output word embeddings should be tied.
+ rope_theta (`float`, *optional*, defaults to 10000.0):
+ The base period of the RoPE embeddings.
+ use_sliding_window (`bool`, *optional*, defaults to `False`):
+ Whether to use sliding window attention.
+ sliding_window (`int`, *optional*, defaults to 4096):
+ Sliding window attention (SWA) window size. If not specified, will default to `4096`.
+ max_window_layers (`int`, *optional*, defaults to 28):
+ The number of layers that use SWA (Sliding Window Attention). The bottom layers use SWA while the top use full attention.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ decoder_sparse_step (`int`, *optional*, defaults to 1):
+ The frequency of the MoE layer.
+ moe_intermediate_size (`int`, *optional*, defaults to 1408):
+ Intermediate size of the routed expert.
+ shared_expert_intermediate_size (`int`, *optional*, defaults to 5632):
+ Intermediate size of the shared expert.
+ num_experts_per_tok (`int`, *optional*, defaults to 4):
+ Number of selected experts.
+ num_experts (`int`, *optional*, defaults to 60):
+ Number of routed experts.
+ norm_topk_prob (`bool`, *optional*, defaults to `False`):
+ Whether to normalize the topk probabilities.
+ output_router_logits (`bool`, *optional*, defaults to `False`):
+ Whether or not the router logits should be returned by the model. Enabeling this will also
+ allow the model to output the auxiliary loss, including load balancing loss and router z-loss.
+ router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
+ The aux loss factor for the total loss.
+
+ ```python
+ >>> from transformers import Qwen2MoeModel, Qwen2MoeConfig
+
+ >>> # Initializing a Qwen2MoE style configuration
+ >>> configuration = Qwen2MoeConfig()
+
+ >>> # Initializing a model from the Qwen1.5-MoE-A2.7B" style configuration
+ >>> model = Qwen2MoeModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "qwen2_moe"
+ keys_to_ignore_at_inference = ["past_key_values"]
+
+ def __init__(
+ self,
+ vocab_size=151936,
+ hidden_size=2048,
+ intermediate_size=5632,
+ num_hidden_layers=24,
+ num_attention_heads=16,
+ num_key_value_heads=16,
+ hidden_act="silu",
+ max_position_embeddings=32768,
+ initializer_range=0.02,
+ rms_norm_eps=1e-6,
+ use_cache=True,
+ tie_word_embeddings=False,
+ rope_theta=10000.0,
+ use_sliding_window=False,
+ sliding_window=4096,
+ max_window_layers=28,
+ attention_dropout=0.0,
+ decoder_sparse_step=1,
+ moe_intermediate_size=1408,
+ shared_expert_intermediate_size=5632,
+ num_experts_per_tok=4,
+ num_experts=60,
+ norm_topk_prob=False,
+ output_router_logits=False,
+ router_aux_loss_coef=0.001,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.use_sliding_window = use_sliding_window
+ self.sliding_window = sliding_window
+ self.max_window_layers = max_window_layers
+
+ self.num_key_value_heads = num_key_value_heads
+ self.hidden_act = hidden_act
+ self.initializer_range = initializer_range
+ self.rms_norm_eps = rms_norm_eps
+ self.use_cache = use_cache
+ self.rope_theta = rope_theta
+ self.attention_dropout = attention_dropout
+
+ # MoE arguments
+ self.decoder_sparse_step = decoder_sparse_step
+ self.moe_intermediate_size = moe_intermediate_size
+ self.shared_expert_intermediate_size = shared_expert_intermediate_size
+ self.num_experts_per_tok = num_experts_per_tok
+ self.num_experts = num_experts
+ self.norm_topk_prob = norm_topk_prob
+ self.output_router_logits = output_router_logits
+ self.router_aux_loss_coef = router_aux_loss_coef
+
+ super().__init__(
+ tie_word_embeddings=tie_word_embeddings,
+ **kwargs,
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/qwen2_moe/modeling_qwen2_moe.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/qwen2_moe/modeling_qwen2_moe.py
new file mode 100644
index 0000000000000000000000000000000000000000..70072c91720a57f44613e404757003973e2b73db
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/qwen2_moe/modeling_qwen2_moe.py
@@ -0,0 +1,1595 @@
+# coding=utf-8
+# Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
+#
+# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
+# and OPT implementations in this library. It has been modified from its
+# original forms to accommodate minor architectural differences compared
+# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch Qwen2MoE model."""
+import inspect
+import math
+import warnings
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.nn.functional as F
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...cache_utils import Cache, DynamicCache
+from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa
+from ...modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast, SequenceClassifierOutputWithPast
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_flash_attn_2_available,
+ is_flash_attn_greater_or_equal_2_10,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_qwen2_moe import Qwen2MoeConfig
+
+
+if is_flash_attn_2_available():
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
+
+ _flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters)
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "Qwen/Qwen1.5-MoE-A2.7B"
+_CONFIG_FOR_DOC = "Qwen2MoeConfig"
+
+QWEN2MOE_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "Qwen/Qwen1.5-MoE-A2.7B",
+ # See all Qwen2 models at https://huggingface.co/models?filter=qwen2
+]
+
+
+# Copied from transformers.models.mixtral.modeling_mixtral.load_balancing_loss_func
+def load_balancing_loss_func(
+ gate_logits: torch.Tensor, num_experts: torch.Tensor = None, top_k=2, attention_mask: Optional[torch.Tensor] = None
+) -> float:
+ r"""
+ Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
+
+ See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss
+ function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
+ experts is too unbalanced.
+
+ Args:
+ gate_logits (Union[`torch.Tensor`, Tuple[torch.Tensor]):
+ Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
+ shape [batch_size X sequence_length, num_experts].
+ attention_mask (`torch.Tensor`, None):
+ The attention_mask used in forward function
+ shape [batch_size X sequence_length] if not None.
+ num_experts (`int`, *optional*):
+ Number of experts
+
+ Returns:
+ The auxiliary loss.
+ """
+ if gate_logits is None or not isinstance(gate_logits, tuple):
+ return 0
+
+ if isinstance(gate_logits, tuple):
+ compute_device = gate_logits[0].device
+ concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
+
+ routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
+
+ _, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
+
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
+
+ if attention_mask is None:
+ # Compute the percentage of tokens routed to each experts
+ tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
+
+ # Compute the average probability of routing to these experts
+ router_prob_per_expert = torch.mean(routing_weights, dim=0)
+ else:
+ batch_size, sequence_length = attention_mask.shape
+ num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
+
+ # Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
+ expert_attention_mask = (
+ attention_mask[None, :, :, None, None]
+ .expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
+ .reshape(-1, top_k, num_experts)
+ .to(compute_device)
+ )
+
+ # Compute the percentage of tokens routed to each experts
+ tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
+ expert_attention_mask, dim=0
+ )
+
+ # Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
+ router_per_expert_attention_mask = (
+ attention_mask[None, :, :, None]
+ .expand((num_hidden_layers, batch_size, sequence_length, num_experts))
+ .reshape(-1, num_experts)
+ .to(compute_device)
+ )
+
+ # Compute the average probability of routing to these experts
+ router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
+ router_per_expert_attention_mask, dim=0
+ )
+
+ overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
+ return overall_loss * num_experts
+
+
+# Copied from transformers.models.llama.modeling_llama._get_unpad_data
+def _get_unpad_data(attention_mask):
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
+ return (
+ indices,
+ cu_seqlens,
+ max_seqlen_in_batch,
+ )
+
+
+# Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Qwen2Moe
+class Qwen2MoeRMSNorm(nn.Module):
+ def __init__(self, hidden_size, eps=1e-6):
+ """
+ Qwen2MoeRMSNorm is equivalent to T5LayerNorm
+ """
+ super().__init__()
+ self.weight = nn.Parameter(torch.ones(hidden_size))
+ self.variance_epsilon = eps
+
+ def forward(self, hidden_states):
+ input_dtype = hidden_states.dtype
+ hidden_states = hidden_states.to(torch.float32)
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
+ return self.weight * hidden_states.to(input_dtype)
+
+
+# Copied from transformers.models.mistral.modeling_mistral.MistralRotaryEmbedding with Mistral->Qwen2Moe
+class Qwen2MoeRotaryEmbedding(nn.Module):
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
+ super().__init__()
+
+ self.dim = dim
+ self.max_position_embeddings = max_position_embeddings
+ self.base = base
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
+
+ # Build here to make `torch.jit.trace` work.
+ self._set_cos_sin_cache(
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
+ )
+
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
+ self.max_seq_len_cached = seq_len
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
+
+ freqs = torch.outer(t, self.inv_freq)
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
+ emb = torch.cat((freqs, freqs), dim=-1)
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
+
+ def forward(self, x, seq_len=None):
+ # x: [bs, num_attention_heads, seq_len, head_size]
+ if seq_len > self.max_seq_len_cached:
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
+
+ return (
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
+ )
+
+
+# Copied from transformers.models.llama.modeling_llama.rotate_half
+def rotate_half(x):
+ """Rotates half the hidden dims of the input."""
+ x1 = x[..., : x.shape[-1] // 2]
+ x2 = x[..., x.shape[-1] // 2 :]
+ return torch.cat((-x2, x1), dim=-1)
+
+
+# Copied from transformers.models.mistral.modeling_mistral.apply_rotary_pos_emb
+def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
+ """Applies Rotary Position Embedding to the query and key tensors.
+
+ Args:
+ q (`torch.Tensor`): The query tensor.
+ k (`torch.Tensor`): The key tensor.
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
+ position_ids (`torch.Tensor`):
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
+ used to pass offsetted position ids when working with a KV-cache.
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
+ Returns:
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
+ """
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
+ q_embed = (q * cos) + (rotate_half(q) * sin)
+ k_embed = (k * cos) + (rotate_half(k) * sin)
+ return q_embed, k_embed
+
+
+# Modified from transformers.models.mistral.modeling_mistral.MistralMLP with Mistral->Qwen2Moe
+class Qwen2MoeMLP(nn.Module):
+ def __init__(self, config, intermediate_size=None):
+ super().__init__()
+ self.config = config
+ self.hidden_size = config.hidden_size
+ self.intermediate_size = intermediate_size
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
+ self.act_fn = ACT2FN[config.hidden_act]
+
+ def forward(self, x):
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
+
+
+# Copied from transformers.models.llama.modeling_llama.repeat_kv
+def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
+ """
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
+ """
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
+ if n_rep == 1:
+ return hidden_states
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
+
+
+# Copied from transformers.models.qwen2.modeling_qwen2.Qwen2Attention with Qwen2->Qwen2Moe
+class Qwen2MoeAttention(nn.Module):
+ """
+ Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
+ and "Generating Long Sequences with Sparse Transformers".
+ """
+
+ def __init__(self, config: Qwen2MoeConfig, layer_idx: Optional[int] = None):
+ super().__init__()
+ self.config = config
+ self.layer_idx = layer_idx
+ if layer_idx is None:
+ logger.warning_once(
+ f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
+ "to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
+ "when creating this class."
+ )
+
+ self.hidden_size = config.hidden_size
+ self.num_heads = config.num_attention_heads
+ self.head_dim = self.hidden_size // self.num_heads
+ self.num_key_value_heads = config.num_key_value_heads
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
+ self.max_position_embeddings = config.max_position_embeddings
+ self.rope_theta = config.rope_theta
+ self.is_causal = True
+ self.attention_dropout = config.attention_dropout
+
+ if (self.head_dim * self.num_heads) != self.hidden_size:
+ raise ValueError(
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
+ f" and `num_heads`: {self.num_heads})."
+ )
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=True)
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
+
+ self.rotary_emb = Qwen2MoeRotaryEmbedding(
+ self.head_dim,
+ max_position_embeddings=self.max_position_embeddings,
+ base=self.rope_theta,
+ )
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ **kwargs,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ if "padding_mask" in kwargs:
+ warnings.warn(
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
+ )
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ kv_seq_len = key_states.shape[-2]
+ if past_key_value is not None:
+ if self.layer_idx is None:
+ raise ValueError(
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
+ "with a layer index."
+ )
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
+
+ if past_key_value is not None:
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ # repeat k/v heads if n_kv_heads < n_heads
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
+
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
+
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
+ )
+
+ attn_weights = attn_weights + attention_mask
+
+ # upcast attention to fp32
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
+ attn_output = torch.matmul(attn_weights, value_states)
+
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.transpose(1, 2).contiguous()
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
+
+ attn_output = self.o_proj(attn_output)
+
+ if not output_attentions:
+ attn_weights = None
+
+ return attn_output, attn_weights, past_key_value
+
+
+# Copied from transformers.models.qwen2.modeling_qwen2.Qwen2FlashAttention2 with Qwen2->Qwen2Moe
+class Qwen2MoeFlashAttention2(Qwen2MoeAttention):
+ """
+ Qwen2Moe flash attention module, following Qwen2Moe attention module. This module inherits from `Qwen2MoeAttention`
+ as the weights of the module stays untouched. The only required change would be on the forward pass
+ where it needs to correctly call the public API of flash attention and deal with padding tokens
+ in case the input contains any of them. Additionally, for sliding window attention, we apply SWA only to the bottom
+ config.max_window_layers layers.
+ """
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ **kwargs,
+ ):
+ if "padding_mask" in kwargs:
+ warnings.warn(
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
+ )
+
+ # overwrite attention_mask with padding_mask
+ attention_mask = kwargs.pop("padding_mask")
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ kv_seq_len = key_states.shape[-2]
+ if past_key_value is not None:
+ if self.layer_idx is None:
+ raise ValueError(
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
+ "with a layer index."
+ )
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
+
+ # Because the input can be padded, the absolute sequence length depends on the max position id.
+ rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1
+ cos, sin = self.rotary_emb(value_states, seq_len=rotary_seq_len)
+
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
+
+ use_sliding_windows = (
+ _flash_supports_window_size
+ and getattr(self.config, "sliding_window", None) is not None
+ and kv_seq_len > self.config.sliding_window
+ and self.config.use_sliding_window
+ )
+
+ if not _flash_supports_window_size:
+ logger.warning_once(
+ "The current flash attention version does not support sliding window attention, for a more memory efficient implementation"
+ " make sure to upgrade flash-attn library."
+ )
+
+ if past_key_value is not None:
+ # Activate slicing cache only if the config has a value `sliding_windows` attribute
+ cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0
+ if (
+ getattr(self.config, "sliding_window", None) is not None
+ and kv_seq_len > self.config.sliding_window
+ and cache_has_contents
+ ):
+ slicing_tokens = 1 - self.config.sliding_window
+
+ past_key = past_key_value[self.layer_idx][0]
+ past_value = past_key_value[self.layer_idx][1]
+
+ past_key = past_key[:, :, slicing_tokens:, :].contiguous()
+ past_value = past_value[:, :, slicing_tokens:, :].contiguous()
+
+ if past_key.shape[-2] != self.config.sliding_window - 1:
+ raise ValueError(
+ f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got"
+ f" {past_key.shape}"
+ )
+
+ if attention_mask is not None:
+ attention_mask = attention_mask[:, slicing_tokens:]
+ attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
+
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ # repeat k/v heads if n_kv_heads < n_heads
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
+ dropout_rate = 0.0 if not self.training else self.attention_dropout
+
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
+ # cast them back in float16 just to be sure everything works as expected.
+ input_dtype = query_states.dtype
+ if input_dtype == torch.float32:
+ if torch.is_autocast_enabled():
+ target_dtype = torch.get_autocast_gpu_dtype()
+ # Handle the case where the model is quantized
+ elif hasattr(self.config, "_pre_quantization_dtype"):
+ target_dtype = self.config._pre_quantization_dtype
+ else:
+ target_dtype = self.q_proj.weight.dtype
+
+ logger.warning_once(
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
+ f" {target_dtype}."
+ )
+
+ query_states = query_states.to(target_dtype)
+ key_states = key_states.to(target_dtype)
+ value_states = value_states.to(target_dtype)
+
+ # Reashape to the expected shape for Flash Attention
+ query_states = query_states.transpose(1, 2)
+ key_states = key_states.transpose(1, 2)
+ value_states = value_states.transpose(1, 2)
+
+ attn_output = self._flash_attention_forward(
+ query_states,
+ key_states,
+ value_states,
+ attention_mask,
+ q_len,
+ dropout=dropout_rate,
+ use_sliding_windows=use_sliding_windows,
+ )
+
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
+ attn_output = self.o_proj(attn_output)
+
+ if not output_attentions:
+ attn_weights = None
+
+ return attn_output, attn_weights, past_key_value
+
+ def _flash_attention_forward(
+ self,
+ query_states,
+ key_states,
+ value_states,
+ attention_mask,
+ query_length,
+ dropout=0.0,
+ softmax_scale=None,
+ use_sliding_windows=False,
+ ):
+ """
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
+ first unpad the input, then computes the attention scores and pad the final attention scores.
+
+ Args:
+ query_states (`torch.Tensor`):
+ Input query states to be passed to Flash Attention API
+ key_states (`torch.Tensor`):
+ Input key states to be passed to Flash Attention API
+ value_states (`torch.Tensor`):
+ Input value states to be passed to Flash Attention API
+ attention_mask (`torch.Tensor`):
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
+ position of padding tokens and 1 for the position of non-padding tokens.
+ dropout (`float`):
+ Attention dropout
+ softmax_scale (`float`, *optional*):
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
+ use_sliding_windows (`bool`, *optional*):
+ Whether to activate sliding window attention.
+ """
+ if not self._flash_attn_uses_top_left_mask:
+ causal = self.is_causal
+ else:
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
+ causal = self.is_causal and query_length != 1
+
+ # Decide whether to use SWA or not by layer index.
+ if use_sliding_windows and self.layer_idx >= self.config.max_window_layers:
+ use_sliding_windows = False
+
+ # Contains at least one padding token in the sequence
+ if attention_mask is not None:
+ batch_size = query_states.shape[0]
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
+ query_states, key_states, value_states, attention_mask, query_length
+ )
+
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
+
+ if not use_sliding_windows:
+ attn_output_unpad = flash_attn_varlen_func(
+ query_states,
+ key_states,
+ value_states,
+ cu_seqlens_q=cu_seqlens_q,
+ cu_seqlens_k=cu_seqlens_k,
+ max_seqlen_q=max_seqlen_in_batch_q,
+ max_seqlen_k=max_seqlen_in_batch_k,
+ dropout_p=dropout,
+ softmax_scale=softmax_scale,
+ causal=causal,
+ )
+ else:
+ attn_output_unpad = flash_attn_varlen_func(
+ query_states,
+ key_states,
+ value_states,
+ cu_seqlens_q=cu_seqlens_q,
+ cu_seqlens_k=cu_seqlens_k,
+ max_seqlen_q=max_seqlen_in_batch_q,
+ max_seqlen_k=max_seqlen_in_batch_k,
+ dropout_p=dropout,
+ softmax_scale=softmax_scale,
+ causal=causal,
+ window_size=(self.config.sliding_window, self.config.sliding_window),
+ )
+
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
+ else:
+ if not use_sliding_windows:
+ attn_output = flash_attn_func(
+ query_states,
+ key_states,
+ value_states,
+ dropout,
+ softmax_scale=softmax_scale,
+ causal=causal,
+ )
+ else:
+ attn_output = flash_attn_func(
+ query_states,
+ key_states,
+ value_states,
+ dropout,
+ softmax_scale=softmax_scale,
+ causal=causal,
+ window_size=(self.config.sliding_window, self.config.sliding_window),
+ )
+
+ return attn_output
+
+ # Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._upad_input
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
+ batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
+
+ # On the first iteration we need to properly re-create the padding mask
+ # by slicing it on the proper place
+ if kv_seq_len != attention_mask.shape[-1]:
+ attention_mask_num_tokens = attention_mask.shape[-1]
+ attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :]
+
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
+
+ key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
+ value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
+
+ if query_length == kv_seq_len:
+ query_layer = index_first_axis(
+ query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
+ )
+ cu_seqlens_q = cu_seqlens_k
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
+ indices_q = indices_k
+ elif query_length == 1:
+ max_seqlen_in_batch_q = 1
+ cu_seqlens_q = torch.arange(
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
+ ) # There is a memcpy here, that is very bad.
+ indices_q = cu_seqlens_q[:-1]
+ query_layer = query_layer.squeeze(1)
+ else:
+ # The -q_len: slice assumes left padding.
+ attention_mask = attention_mask[:, -query_length:]
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
+
+ return (
+ query_layer,
+ key_layer,
+ value_layer,
+ indices_q,
+ (cu_seqlens_q, cu_seqlens_k),
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
+ )
+
+
+# Copied from transformers.models.mistral.modeling_mistral.MistralSdpaAttention with Mistral->Qwen2Moe
+class Qwen2MoeSdpaAttention(Qwen2MoeAttention):
+ """
+ Qwen2Moe attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
+ `Qwen2MoeAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
+ SDPA API.
+ """
+
+ # Adapted from Qwen2MoeAttention.forward
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ if output_attentions:
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
+ logger.warning_once(
+ "Qwen2MoeModel is using Qwen2MoeSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
+ )
+ return super().forward(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ kv_seq_len = key_states.shape[-2]
+ if past_key_value is not None:
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
+
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
+
+ if past_key_value is not None:
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
+ )
+
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
+ if query_states.device.type == "cuda" and attention_mask is not None:
+ query_states = query_states.contiguous()
+ key_states = key_states.contiguous()
+ value_states = value_states.contiguous()
+
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
+ query_states,
+ key_states,
+ value_states,
+ attn_mask=attention_mask,
+ dropout_p=self.attention_dropout if self.training else 0.0,
+ # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
+ is_causal=self.is_causal and attention_mask is None and q_len > 1,
+ )
+
+ attn_output = attn_output.transpose(1, 2).contiguous()
+ attn_output = attn_output.view(bsz, q_len, self.hidden_size)
+
+ attn_output = self.o_proj(attn_output)
+
+ return attn_output, None, past_key_value
+
+
+QWEN2MOE_ATTENTION_CLASSES = {
+ "eager": Qwen2MoeAttention,
+ "flash_attention_2": Qwen2MoeFlashAttention2,
+ "sdpa": Qwen2MoeSdpaAttention,
+}
+
+
+class Qwen2MoeSparseMoeBlock(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.num_experts = config.num_experts
+ self.top_k = config.num_experts_per_tok
+ self.norm_topk_prob = config.norm_topk_prob
+
+ # gating
+ self.gate = nn.Linear(config.hidden_size, config.num_experts, bias=False)
+ self.experts = nn.ModuleList(
+ [Qwen2MoeMLP(config, intermediate_size=config.moe_intermediate_size) for _ in range(self.num_experts)]
+ )
+
+ self.shared_expert = Qwen2MoeMLP(config, intermediate_size=config.shared_expert_intermediate_size)
+ self.shared_expert_gate = torch.nn.Linear(config.hidden_size, 1, bias=False)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ """ """
+ batch_size, sequence_length, hidden_dim = hidden_states.shape
+ hidden_states = hidden_states.view(-1, hidden_dim)
+ # router_logits: (batch * sequence_length, n_experts)
+ router_logits = self.gate(hidden_states)
+
+ routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)
+ routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
+ if self.norm_topk_prob:
+ routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
+ # we cast back to the input dtype
+ routing_weights = routing_weights.to(hidden_states.dtype)
+
+ final_hidden_states = torch.zeros(
+ (batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device
+ )
+
+ # One hot encode the selected experts to create an expert mask
+ # this will be used to easily index which expert is going to be sollicitated
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0)
+
+ # Loop over all available experts in the model and perform the computation on each expert
+ for expert_idx in range(self.num_experts):
+ expert_layer = self.experts[expert_idx]
+ idx, top_x = torch.where(expert_mask[expert_idx])
+
+ # Index the correct hidden states and compute the expert hidden state for
+ # the current expert. We need to make sure to multiply the output hidden
+ # states by `routing_weights` on the corresponding tokens (top-1 and top-2)
+ current_state = hidden_states[None, top_x].reshape(-1, hidden_dim)
+ current_hidden_states = expert_layer(current_state) * routing_weights[top_x, idx, None]
+
+ # However `index_add_` only support torch tensors for indexing so we'll use
+ # the `top_x` tensor here.
+ final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))
+
+ shared_expert_output = self.shared_expert(hidden_states)
+ shared_expert_output = F.sigmoid(self.shared_expert_gate(hidden_states)) * shared_expert_output
+
+ final_hidden_states = final_hidden_states + shared_expert_output
+
+ final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim)
+ return final_hidden_states, router_logits
+
+
+class Qwen2MoeDecoderLayer(nn.Module):
+ def __init__(self, config: Qwen2MoeConfig, layer_idx: int):
+ super().__init__()
+ self.hidden_size = config.hidden_size
+
+ self.self_attn = QWEN2MOE_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx)
+
+ if config.num_experts > 0 and (layer_idx + 1) % config.decoder_sparse_step == 0:
+ self.mlp = Qwen2MoeSparseMoeBlock(config)
+ else:
+ self.mlp = Qwen2MoeMLP(config, intermediate_size=config.intermediate_size)
+
+ self.input_layernorm = Qwen2MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+ self.post_attention_layernorm = Qwen2MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: Optional[bool] = False,
+ output_router_logits: Optional[bool] = False,
+ use_cache: Optional[bool] = False,
+ **kwargs,
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
+ if "padding_mask" in kwargs:
+ warnings.warn(
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. "
+ "Please make sure use `attention_mask` instead.`"
+ )
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
+ `(batch, sequence_length)` where padding elements are indicated by 0.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_router_logits (`bool`, *optional*):
+ Whether or not to return the logits of all the routers. They are useful for computing the router loss,
+ and should not be returned during inference.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
+ """
+
+ residual = hidden_states
+
+ hidden_states = self.input_layernorm(hidden_states)
+
+ # Self Attention
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+ hidden_states = residual + hidden_states
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.post_attention_layernorm(hidden_states)
+
+ hidden_states = self.mlp(hidden_states)
+ if isinstance(hidden_states, tuple):
+ hidden_states, router_logits = hidden_states
+ else:
+ router_logits = None
+
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights,)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ if output_router_logits:
+ outputs += (router_logits,)
+
+ return outputs
+
+
+QWEN2MOE_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`Qwen2MoeConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+@add_start_docstrings(
+ "The bare Qwen2MoE Model outputting raw hidden-states without any specific head on top.",
+ QWEN2MOE_START_DOCSTRING,
+)
+class Qwen2MoePreTrainedModel(PreTrainedModel):
+ config_class = Qwen2MoeConfig
+ base_model_prefix = "model"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["Qwen2MoeDecoderLayer"]
+ _skip_keys_device_placement = "past_key_values"
+ _supports_flash_attn_2 = True
+ _supports_sdpa = True
+ _supports_cache_class = True
+
+ def _init_weights(self, module):
+ std = self.config.initializer_range
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+
+QWEN2MOE_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
+ information on the default strategy.
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.n_positions - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
+
+ Two formats are allowed:
+ - a [`~cache_utils.Cache`] instance;
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
+ cache format.
+
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
+ legacy cache format will be returned.
+
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
+ of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ output_router_logits (`bool`, *optional*):
+ Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
+ should not be returned during inference.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare Qwen2MoE Model outputting raw hidden-states without any specific head on top.",
+ QWEN2MOE_START_DOCSTRING,
+)
+class Qwen2MoeModel(Qwen2MoePreTrainedModel):
+ """
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Qwen2MoeDecoderLayer`]
+
+ Args:
+ config: Qwen2MoeConfig
+ """
+
+ def __init__(self, config: Qwen2MoeConfig):
+ super().__init__(config)
+ self.padding_idx = config.pad_token_id
+ self.vocab_size = config.vocab_size
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
+ self.layers = nn.ModuleList(
+ [Qwen2MoeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
+ )
+ self._attn_implementation = config._attn_implementation
+ self.norm = Qwen2MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.embed_tokens = value
+
+ @add_start_docstrings_to_model_forward(QWEN2MOE_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_router_logits: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, MoeModelOutputWithPast]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_router_logits = (
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
+ )
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # retrieve input_ids and inputs_embeds
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
+ elif input_ids is not None:
+ batch_size, seq_length = input_ids.shape
+ elif inputs_embeds is not None:
+ batch_size, seq_length, _ = inputs_embeds.shape
+ else:
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ past_key_values_length = 0
+
+ if use_cache:
+ use_legacy_cache = not isinstance(past_key_values, Cache)
+ if use_legacy_cache:
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
+ past_key_values_length = past_key_values.get_usable_length(seq_length)
+
+ if position_ids is None:
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+ position_ids = torch.arange(
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
+ )
+ position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
+ else:
+ position_ids = position_ids.view(-1, seq_length).long()
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids)
+
+ if attention_mask is not None and self._attn_implementation == "flash_attention_2" and use_cache:
+ is_padding_right = attention_mask[:, -1].sum().item() != batch_size
+ if is_padding_right:
+ raise ValueError(
+ "You are attempting to perform batched generation with padding_side='right'"
+ " this may lead to unexpected behaviour for Flash Attention version of Qwen2MoE. Make sure to "
+ " call `tokenizer.padding_side = 'left'` before tokenizing the input. "
+ )
+
+ if self._attn_implementation == "flash_attention_2":
+ # 2d mask is passed through the layers
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
+ elif self._attn_implementation == "sdpa" and not output_attentions:
+ # output_attentions=True can not be supported when using SDPA, and we fall back on
+ # the manual implementation that requires a 4D causal mask in all cases.
+ attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
+ attention_mask,
+ (batch_size, seq_length),
+ inputs_embeds,
+ past_key_values_length,
+ sliding_window=self.config.sliding_window,
+ )
+ else:
+ # 4d mask is passed through the layers
+ attention_mask = _prepare_4d_causal_attention_mask(
+ attention_mask,
+ (batch_size, seq_length),
+ inputs_embeds,
+ past_key_values_length,
+ sliding_window=self.config.sliding_window,
+ )
+
+ hidden_states = inputs_embeds
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_router_logits = () if output_router_logits else None
+ next_decoder_cache = None
+
+ for decoder_layer in self.layers:
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ position_ids,
+ past_key_values,
+ output_attentions,
+ output_router_logits,
+ use_cache,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_values,
+ output_attentions=output_attentions,
+ output_router_logits=output_router_logits,
+ use_cache=use_cache,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ if output_router_logits and layer_outputs[-1] is not None:
+ all_router_logits += (layer_outputs[-1],)
+
+ hidden_states = self.norm(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ next_cache = None
+ if use_cache:
+ next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_router_logits]
+ if v is not None
+ )
+ return MoeModelOutputWithPast(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ router_logits=all_router_logits,
+ )
+
+
+class Qwen2MoeForCausalLM(Qwen2MoePreTrainedModel):
+ _tied_weights_keys = ["lm_head.weight"]
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.model = Qwen2MoeModel(config)
+ self.vocab_size = config.vocab_size
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ self.router_aux_loss_coef = config.router_aux_loss_coef
+ self.num_experts = config.num_experts
+ self.num_experts_per_tok = config.num_experts_per_tok
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.embed_tokens = value
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def set_decoder(self, decoder):
+ self.model = decoder
+
+ def get_decoder(self):
+ return self.model
+
+ @add_start_docstrings_to_model_forward(QWEN2MOE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=MoeCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_router_logits: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, MoeCausalLMOutputWithPast]:
+ r"""
+ Args:
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, Qwen2MoeForCausalLM
+
+ >>> model = Qwen2MoeForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
+
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
+
+ >>> # Generate
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
+ ```"""
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_router_logits = (
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
+ )
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
+ outputs = self.model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ output_router_logits=output_router_logits,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ logits = self.lm_head(hidden_states)
+ logits = logits.float()
+
+ loss = None
+ if labels is not None:
+ # Shift so that tokens < n predict n
+ shift_logits = logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous()
+ # Flatten the tokens
+ loss_fct = CrossEntropyLoss()
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
+ shift_labels = shift_labels.view(-1)
+ # Enable model parallelism
+ shift_labels = shift_labels.to(shift_logits.device)
+ loss = loss_fct(shift_logits, shift_labels)
+
+ aux_loss = None
+ if output_router_logits:
+ aux_loss = load_balancing_loss_func(
+ outputs.router_logits if return_dict else outputs[-1],
+ self.num_experts,
+ self.num_experts_per_tok,
+ attention_mask,
+ )
+ if labels is not None:
+ loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ if output_router_logits:
+ output = (aux_loss,) + output
+ return (loss,) + output if loss is not None else output
+
+ return MoeCausalLMOutputWithPast(
+ loss=loss,
+ aux_loss=aux_loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ router_logits=outputs.router_logits,
+ )
+
+ def prepare_inputs_for_generation(
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
+ ):
+ # Omit tokens covered by past_key_values
+ if past_key_values is not None:
+ if isinstance(past_key_values, Cache):
+ cache_length = past_key_values.get_seq_length()
+ past_length = past_key_values.seen_tokens
+ max_cache_length = past_key_values.get_max_length()
+ else:
+ cache_length = past_length = past_key_values[0][0].shape[2]
+ max_cache_length = None
+
+ # Keep only the unprocessed tokens:
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
+ # input)
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
+ # input_ids based on the past_length.
+ elif past_length < input_ids.shape[1]:
+ input_ids = input_ids[:, past_length:]
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
+
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
+ if (
+ max_cache_length is not None
+ and attention_mask is not None
+ and cache_length + input_ids.shape[1] > max_cache_length
+ ):
+ attention_mask = attention_mask[:, -max_cache_length:]
+
+ position_ids = kwargs.get("position_ids", None)
+ if attention_mask is not None and position_ids is None:
+ # create position_ids on the fly for batch generation
+ position_ids = attention_mask.long().cumsum(-1) - 1
+ position_ids.masked_fill_(attention_mask == 0, 1)
+ if past_key_values:
+ position_ids = position_ids[:, -input_ids.shape[1] :]
+
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
+ if inputs_embeds is not None and past_key_values is None:
+ model_inputs = {"inputs_embeds": inputs_embeds}
+ else:
+ model_inputs = {"input_ids": input_ids}
+
+ model_inputs.update(
+ {
+ "position_ids": position_ids,
+ "past_key_values": past_key_values,
+ "use_cache": kwargs.get("use_cache"),
+ "attention_mask": attention_mask,
+ }
+ )
+ return model_inputs
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+ return reordered_past
+
+
+@add_start_docstrings(
+ """
+ The Qwen2MoE Model transformer with a sequence classification head on top (linear layer).
+
+ [`Qwen2MoeForSequenceClassification`] uses the last token in order to do the classification, as other causal models
+ (e.g. GPT-2) do.
+
+ Since it does classification on the last token, it requires to know the position of the last token. If a
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
+ each row of the batch).
+ """,
+ QWEN2MOE_START_DOCSTRING,
+)
+# Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with Llama->Qwen2Moe, LLAMA->QWEN2MOE
+class Qwen2MoeForSequenceClassification(Qwen2MoePreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.model = Qwen2MoeModel(config)
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.embed_tokens = value
+
+ @add_start_docstrings_to_model_forward(QWEN2MOE_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.model(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = transformer_outputs[0]
+ logits = self.score(hidden_states)
+
+ if input_ids is not None:
+ batch_size = input_ids.shape[0]
+ else:
+ batch_size = inputs_embeds.shape[0]
+
+ if self.config.pad_token_id is None and batch_size != 1:
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
+ if self.config.pad_token_id is None:
+ sequence_lengths = -1
+ else:
+ if input_ids is not None:
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
+ sequence_lengths = sequence_lengths.to(logits.device)
+ else:
+ sequence_lengths = -1
+
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
+
+ loss = None
+ if labels is not None:
+ labels = labels.to(logits.device)
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(pooled_logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(pooled_logits, labels)
+ if not return_dict:
+ output = (pooled_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutputWithPast(
+ loss=loss,
+ logits=pooled_logits,
+ past_key_values=transformer_outputs.past_key_values,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/convert_fairseq2_to_hf.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/convert_fairseq2_to_hf.py
new file mode 100644
index 0000000000000000000000000000000000000000..8d4320cff5bd9bb80670b0ee7db752fdf6f38e60
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/convert_fairseq2_to_hf.py
@@ -0,0 +1,405 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Converting Meta SeamlessM4Tv2 checkpoints from seamless_communication to HF."""
+
+
+import argparse
+import os
+from pathlib import Path
+
+import torch
+from accelerate.utils.modeling import find_tied_parameters
+from seamless_communication.inference import Translator
+
+from transformers import (
+ SeamlessM4TFeatureExtractor,
+ SeamlessM4TProcessor,
+ SeamlessM4TTokenizer,
+ SeamlessM4Tv2Config,
+ SeamlessM4Tv2Model,
+)
+from transformers.utils import logging
+
+
+# fmt: off
+UNIT_SUPPORTED_LANGUAGES = ["__arb__", "__ben__", "__cat__", "__ces__", "__cmn__", "__cym__", "__dan__", "__deu__", "__eng__", "__est__", "__fin__", "__fra__", "__hin__", "__ind__", "__ita__", "__jpn__", "__kan__", "__kor__", "__mlt__", "__nld__", "__pes__", "__pol__", "__por__", "__ron__", "__rus__", "__slk__", "__spa__", "__swe__", "__swh__", "__tam__", "__tel__", "__tgl__", "__tha__", "__tur__", "__ukr__", "__urd__", "__uzn__", "__vie__", ]
+# fmt: on
+
+# fmt: off
+VOCODER_SUPPORTED_LANGUAGES = ["__arb__", "__ben__", "__cat__", "__ces__", "__cmn__", "__cym__", "__dan__", "__deu__", "__eng__", "__est__", "__fin__", "__fra__", "__hin__", "__ind__", "__ita__", "__jpn__", "__kor__", "__mlt__", "__nld__", "__pes__", "__pol__", "__por__", "__ron__", "__rus__", "__slk__", "__spa__", "__swe__", "__swh__", "__tel__", "__tgl__", "__tha__", "__tur__", "__ukr__", "__urd__", "__uzn__", "__vie__",]
+# fmt: on
+
+# fmt: off
+LARGE_SUPPORTED_LANGUAGES = ["afr","amh","arb","ary","arz","asm","azj","bel","ben","bos","bul","cat","ceb","ces","ckb","cmn","cmn_Hant","cym","dan","deu","ell","eng","est","eus","fin","fra","fuv","gaz","gle","glg","guj","heb","hin","hrv","hun","hye","ibo","ind","isl","ita","jav","jpn","kan","kat","kaz","khk","khm","kir","kor","lao","lit","lug","luo","lvs","mai","mal","mar","mkd","mlt","mni","mya","nld","nno","nob","npi","nya","ory","pan","pbt","pes","pol","por","ron","rus","sat","slk","slv","sna","snd","som","spa","srp","swe","swh","tam","tel","tgk","tgl","tha","tur","ukr","urd","uzn","vie","yor","yue","zlm","zul",]
+# fmt: on
+
+
+def assert_param_count(model_1, model_2):
+ count_1 = sum(p[1].numel() for p in model_1.named_parameters() if "final_proj" not in p[0])
+ count_2 = sum(p[1].numel() for p in model_2.named_parameters() if "final_proj" not in p[0])
+ assert count_1 == count_2, f"{model_1.__class__}: {count_1} != {model_2.__class__}: {count_2}"
+
+
+def param_count(model):
+ return sum(p[1].numel() for p in model.named_parameters() if "final_proj" not in p[0])
+
+
+def _grab_best_device(use_gpu=True):
+ if torch.cuda.device_count() > 0 and use_gpu:
+ device = "cuda"
+ else:
+ device = "cpu"
+ return torch.device(device)
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+vocoder_convert_list = [
+ ("ups", "hifi_gan.upsampler"),
+ ("conv_pre", "hifi_gan.conv_pre"),
+ ("resblocks", "hifi_gan.resblocks"),
+ ("conv_post", "hifi_gan.conv_post"),
+ ("lang", "language_embedding"),
+ ("spkr", "speaker_embedding"),
+ ("dict.", "unit_embedding."),
+ ("dur_predictor.conv1.0", "dur_predictor.conv1"),
+ ("dur_predictor.conv2.0", "dur_predictor.conv2"),
+]
+
+# order is important
+wav2vec_convert_list = [
+ ("speech_encoder_frontend.model_dim_proj", "feature_projection.projection"),
+ ("speech_encoder_frontend.post_extract_layer_norm", "feature_projection.layer_norm"),
+ ("speech_encoder_frontend.pos_encoder.conv", "encoder.pos_conv_embed.conv"),
+ ("speech_encoder.inner.layers", "encoder.layers"),
+ ("speech_encoder.inner_layer_norm", "encoder.layer_norm"),
+ ("speech_encoder.adaptor_layers", "adapter.layers"),
+ ("inner_proj", "intermediate_dense"),
+ ("self_attn.output_proj", "self_attn.linear_out"),
+ ("output_proj", "output_dense"),
+ ("self_attn.k_proj", "self_attn.linear_k"),
+ ("self_attn.v_proj", "self_attn.linear_v"),
+ ("self_attn.q_proj", "self_attn.linear_q"),
+ ("self_attn.sdpa.u_bias", "self_attn.pos_bias_u"),
+ ("self_attn.sdpa.v_bias", "self_attn.pos_bias_v"),
+ ("self_attn.sdpa.rel_k_embed", "self_attn.distance_embedding"),
+ ("self_attn.sdpa.r_proj", "self_attn.linear_pos"),
+ ("conv.pointwise_conv1", "conv_module.pointwise_conv1"),
+ ("conv.pointwise_conv2", "conv_module.pointwise_conv2"),
+ ("conv.depthwise_conv", "conv_module.depthwise_conv"),
+ ("conv.batch_norm", "conv_module.batch_norm"),
+ ("conv.layer_norm", "conv_module.depthwise_layer_norm"),
+ ("conv_layer_norm", "conv_module.layer_norm"),
+ ("speech_encoder.proj1", "intermediate_ffn.intermediate_dense"),
+ ("speech_encoder.proj2", "intermediate_ffn.output_dense"),
+ ("speech_encoder.layer_norm", "inner_layer_norm"),
+]
+
+t2u_convert_list = [
+ ("t2u_model.final_proj", "lm_head"),
+ ("t2u_model.", "model."),
+ ("encoder_decoder_attn_layer_norm", "cross_attention_layer_norm"),
+ ("encoder_decoder_attn", "cross_attention"),
+ ("linear_k", "k_proj"),
+ ("linear_v", "v_proj"),
+ ("linear_q", "q_proj"),
+ ("ffn.inner_proj", "ffn.fc1"),
+ ("ffn.output_proj", "ffn.fc2"),
+ ("output_proj", "out_proj"),
+ ("decoder_frontend.embed_char", "decoder.embed_char"),
+ ("decoder_frontend.pos_emb_alpha_char", "decoder.pos_emb_alpha_char"),
+ ("decoder_frontend.embed", "decoder.embed_tokens"),
+ ("decoder_frontend.pos_emb_alpha", "decoder.pos_emb_alpha"),
+ ("conv1d.conv", "conv"),
+ ("conv1d_layer_norm", "conv_layer_norm"),
+ ("decoder_frontend.variance_adaptor", "decoder"),
+ ("duration_predictor.conv1.0", "duration_predictor.conv1"),
+ ("duration_predictor.conv2.0", "duration_predictor.conv2"),
+]
+
+text_convert_list = [
+ ("text_encoder.", ""),
+ ("text_decoder.", ""),
+ ("text_encoder_frontend.embed", "embed_tokens"),
+ ("text_decoder_frontend.embed", "embed_tokens"),
+ ("encoder_decoder_attn_layer_norm", "cross_attention_layer_norm"),
+ ("encoder_decoder_attn", "cross_attention"),
+ ("linear_k", "k_proj"),
+ ("linear_v", "v_proj"),
+ ("linear_q", "q_proj"),
+ ("ffn.inner_proj", "ffn.fc1"),
+ ("ffn.output_proj", "ffn.fc2"),
+ ("output_proj", "out_proj"),
+ ("final_proj", "lm_head"),
+]
+
+CUR_PATH = os.path.dirname(os.path.abspath(__file__))
+default_cache_dir = os.path.join(os.path.expanduser("~"), ".cache")
+CACHE_DIR = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "huggingface", "hub")
+
+
+def _load_hf_config():
+ return SeamlessM4Tv2Config()
+
+
+def _convert_model(
+ original_model,
+ hf_model,
+ convert_list,
+ device,
+ unwanted_prefix="model.",
+ filter_state_dict="speech",
+ exclude_state_dict=None,
+):
+ state_dict = original_model.state_dict()
+
+ # filter func
+ if isinstance(filter_state_dict, str):
+
+ def filter_func(x):
+ return filter_state_dict in x[0]
+
+ else:
+
+ def filter_func(item):
+ if exclude_state_dict is not None and exclude_state_dict in item[0]:
+ return False
+ for filter_el in filter_state_dict:
+ if filter_el in item[0]:
+ return True
+
+ return False
+
+ state_dict = dict(filter(filter_func, state_dict.items()))
+
+ for k, v in list(state_dict.items()):
+ new_k = k[len(unwanted_prefix) :]
+ for old_layer_name, new_layer_name in convert_list:
+ if old_layer_name in new_k:
+ new_k = new_k.replace(old_layer_name, new_layer_name)
+
+ # must do it by hand
+ if ".layer_norm" in new_k and new_k.split(".layer_norm")[0][-1].isnumeric():
+ new_k = new_k.replace("layer_norm", "final_layer_norm")
+
+ state_dict[new_k] = state_dict.pop(k)
+
+ extra_keys = set(state_dict.keys()) - set(hf_model.state_dict().keys())
+ extra_keys = set(extra_keys)
+ missing_keys = set(hf_model.state_dict().keys()) - set(state_dict.keys())
+ missing_keys = set({k for k in missing_keys if "final_logits_bias" not in k})
+ if len(extra_keys) != 0:
+ raise ValueError(f"extra keys found: {extra_keys}")
+ if len(missing_keys) != 0:
+ raise ValueError(f"missing keys: {missing_keys}")
+ hf_model.load_state_dict(state_dict, strict=False)
+ n_params = param_count(hf_model)
+
+ logger.info(f"model loaded: {round(n_params/1e6,1)}M params")
+
+ hf_model.eval()
+ hf_model.to(device)
+ del state_dict
+
+ return hf_model
+
+
+def load_model(save_dir, model_type, repo_id):
+ """
+ Meta SeamlessM4Tv2 is made of 8 main components:
+ - speech_encoder (#1) and speech_encoder_frontend (#2)
+ - t2u_model (#3)
+ - text_encoder (#4) and text_encoder_frontend (#5)
+ - text_decoder (#6) [and text_decoder_frontend (#5) = equals to text_encoder_frontend]
+ - final_proj (#7)
+ - vocoder (#8)
+ """
+ device = _grab_best_device()
+ name = "seamlessM4T_v2_large"
+
+ original_model = Translator(name, "vocoder_v2", device, dtype=torch.float32)
+
+ ######### TOKENIZER
+
+ langs = LARGE_SUPPORTED_LANGUAGES
+ langs = [f"__{lang}__" for lang in langs]
+ vocab_file = os.path.join(os.path.expanduser("~"), "tokenizer", model_type, "tokenizer.model")
+
+ save_dir = os.path.join(save_dir, name)
+ Path(save_dir).mkdir(exist_ok=True)
+
+ tokenizer = SeamlessM4TTokenizer(vocab_file, additional_special_tokens=langs)
+
+ sanity_check_lang_id = tokenizer.convert_tokens_to_ids("__fra__")
+
+ tokenizer.save_pretrained(save_dir)
+ tokenizer = SeamlessM4TTokenizer.from_pretrained(save_dir)
+
+ if sanity_check_lang_id != tokenizer.convert_tokens_to_ids("__fra__"):
+ raise ValueError(
+ f"Error in tokenizer saving/loading - __fra__ lang id is not coherent: {sanity_check_lang_id} vs {tokenizer.convert_tokens_to_ids('__fra__')}"
+ )
+
+ ####### get language to ids dict
+ text_decoder_lang_code_to_id = {lang.replace("__", ""): tokenizer.convert_tokens_to_ids(lang) for lang in langs}
+ # offset: vocoder unit vocab size + 5 (for EOS/PAD/BOS/UNK/MSK) + len(supported_languages)
+ t2u_lang_code_to_id = {
+ code.replace("__", ""): i + 10005 + len(UNIT_SUPPORTED_LANGUAGES)
+ for i, code in enumerate(UNIT_SUPPORTED_LANGUAGES)
+ }
+ vocoder_lang_code_to_id = {code.replace("__", ""): i for i, code in enumerate(VOCODER_SUPPORTED_LANGUAGES)}
+
+ ######### FE
+
+ fe = SeamlessM4TFeatureExtractor(language_code=langs)
+
+ fe.save_pretrained(save_dir)
+ fe = SeamlessM4TFeatureExtractor.from_pretrained(save_dir)
+
+ processor = SeamlessM4TProcessor(feature_extractor=fe, tokenizer=tokenizer)
+ processor.save_pretrained(save_dir)
+ processor.push_to_hub(repo_id=repo_id, create_pr=True)
+
+ processor = SeamlessM4TProcessor.from_pretrained(save_dir)
+
+ ######## Model
+
+ # init config
+ hf_config = _load_hf_config()
+
+ ######## get id_to_text and char_to_id from original model tokenizers
+ id_to_text = {i: original_model.text_tokenizer.model.index_to_token(i) for i in range(hf_config.vocab_size)}
+ char_to_id = {
+ original_model.model.t2u_model.decoder_frontend.char_tokenizer.model.index_to_token(i): i for i in range(10904)
+ }
+
+ # init model
+ hf_model = SeamlessM4Tv2Model(hf_config)
+
+ hf_model.generation_config.__setattr__("text_decoder_lang_to_code_id", text_decoder_lang_code_to_id)
+ hf_model.generation_config.__setattr__("t2u_lang_code_to_id", t2u_lang_code_to_id)
+ hf_model.generation_config.__setattr__("vocoder_lang_code_to_id", vocoder_lang_code_to_id)
+ hf_model.generation_config.__setattr__("id_to_text", id_to_text)
+ hf_model.generation_config.__setattr__("char_to_id", char_to_id)
+
+ # -1. take care of vocoder
+ # similarly to speech T5 must apply and remove weight norm
+ hf_model.vocoder.apply_weight_norm()
+ hf_model.vocoder = _convert_model(
+ original_model,
+ hf_model.vocoder,
+ vocoder_convert_list,
+ device,
+ unwanted_prefix="vocoder.code_generator.",
+ filter_state_dict="vocoder",
+ )
+ hf_model.vocoder.remove_weight_norm()
+
+ # 1. take care of speech encoder
+ wav2vec = hf_model.speech_encoder
+ hf_model.speech_encoder = _convert_model(
+ original_model, wav2vec, wav2vec_convert_list, device, unwanted_prefix="model.", filter_state_dict="speech"
+ )
+
+ # 2. take care of t2u
+
+ hf_model.t2u_model = _convert_model(
+ original_model,
+ hf_model.t2u_model,
+ t2u_convert_list,
+ device,
+ unwanted_prefix="model.",
+ filter_state_dict="t2u_model",
+ )
+
+ # 3. take care of text encoder
+ hf_model.text_encoder = _convert_model(
+ original_model,
+ hf_model.text_encoder,
+ text_convert_list,
+ device,
+ unwanted_prefix="model.",
+ filter_state_dict=["model.text_encoder"],
+ exclude_state_dict="t2u_model",
+ )
+
+ # 4. take care of text decoder
+ hf_model.text_decoder = _convert_model(
+ original_model,
+ hf_model.text_decoder,
+ text_convert_list,
+ device,
+ unwanted_prefix="model.",
+ filter_state_dict=["model.text_decoder"],
+ exclude_state_dict="t2u_model",
+ )
+
+ # 5. take care of final proj
+ hf_model.lm_head = _convert_model(
+ original_model,
+ hf_model.lm_head,
+ [("final_proj.", "")],
+ device,
+ unwanted_prefix="model.",
+ filter_state_dict=["model.final_proj"],
+ exclude_state_dict="t2u_model",
+ )
+
+ # sanity check
+ print(find_tied_parameters(hf_model))
+
+ count_1 = param_count(hf_model)
+ count_2 = param_count(original_model)
+
+ print(f"HF MODEL:{count_1}, ORIGINAL_MODEL: {count_2}, diff:{count_1 - count_2}")
+ print(f"HF MODEL excluding embeddings:{hf_model.num_parameters(exclude_embeddings=True)}")
+
+ del original_model
+
+ hf_model.generation_config._from_model_config = False
+ hf_model.save_pretrained(save_dir)
+ hf_model.push_to_hub(repo_id=repo_id, create_pr=True)
+ hf_model = SeamlessM4Tv2Model.from_pretrained(save_dir)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+
+ parser.add_argument(
+ "--model_type",
+ default="large",
+ type=str,
+ help="Model type.",
+ )
+
+ parser.add_argument(
+ "--save_dir",
+ default="/home/ubuntu/weights_v2",
+ type=str,
+ help="Path to the output PyTorch model.",
+ )
+
+ parser.add_argument(
+ "--repo_id",
+ default="facebook/seamless-m4t-v2-large",
+ type=str,
+ help="Repo ID.",
+ )
+
+ args = parser.parse_args()
+
+ load_model(args.save_dir, args.model_type, args.repo_id)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3194f99931a4d689f6bab0cf3cb9dc6abaf11fb8
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__init__.py
@@ -0,0 +1,108 @@
+# Copyright 2021 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_sentencepiece_available,
+ is_tf_available,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
+ "feature_extraction_speech_to_text": ["Speech2TextFeatureExtractor"],
+ "processing_speech_to_text": ["Speech2TextProcessor"],
+}
+
+try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_speech_to_text"] = ["Speech2TextTokenizer"]
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_speech_to_text"] = [
+ "TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TFSpeech2TextForConditionalGeneration",
+ "TFSpeech2TextModel",
+ "TFSpeech2TextPreTrainedModel",
+ ]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_speech_to_text"] = [
+ "SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "Speech2TextForConditionalGeneration",
+ "Speech2TextModel",
+ "Speech2TextPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, Speech2TextConfig
+ from .feature_extraction_speech_to_text import Speech2TextFeatureExtractor
+ from .processing_speech_to_text import Speech2TextProcessor
+
+ try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_speech_to_text import Speech2TextTokenizer
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_speech_to_text import (
+ TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TFSpeech2TextForConditionalGeneration,
+ TFSpeech2TextModel,
+ TFSpeech2TextPreTrainedModel,
+ )
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_speech_to_text import (
+ SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ Speech2TextForConditionalGeneration,
+ Speech2TextModel,
+ Speech2TextPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dc5263e3cf669c80223b3d37cb7c78eeae787716
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/configuration_speech_to_text.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/configuration_speech_to_text.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ab6169c95b34e9f773bbb4287813f8c00b283b0a
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/configuration_speech_to_text.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/convert_s2t_fairseq_to_tfms.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/convert_s2t_fairseq_to_tfms.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..58cfac135cc9be79cffc4bd72ebe26566defdbf2
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/convert_s2t_fairseq_to_tfms.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/feature_extraction_speech_to_text.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/feature_extraction_speech_to_text.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5714cea3dadd89e17cd1e6df57360394043005b4
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/feature_extraction_speech_to_text.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/modeling_speech_to_text.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/modeling_speech_to_text.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8e3c4db29e8682e75bd671455a427a24a3026e6e
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/modeling_speech_to_text.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/modeling_tf_speech_to_text.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/modeling_tf_speech_to_text.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..adfa91a51a294681ce13ea573e7354cf1570ecc9
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/modeling_tf_speech_to_text.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/processing_speech_to_text.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/processing_speech_to_text.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..245c77b69fa1787bc53ed6f32b39c1b5c08028d4
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/processing_speech_to_text.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/tokenization_speech_to_text.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/tokenization_speech_to_text.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bb078fddb68db8f93f0c87d102a9fd6747a3e7ea
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/tokenization_speech_to_text.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/configuration_speech_to_text.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/configuration_speech_to_text.py
new file mode 100644
index 0000000000000000000000000000000000000000..67dee8dc0bc361e5046052263651d36273d41d7f
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/configuration_speech_to_text.py
@@ -0,0 +1,199 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Speech2Text model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class Speech2TextConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`Speech2TextModel`]. It is used to instantiate a
+ Speech2Text model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the Speech2Text
+ [facebook/s2t-small-librispeech-asr](https://huggingface.co/facebook/s2t-small-librispeech-asr) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 10000):
+ Vocabulary size of the Speech2Text model. Defines the number of different tokens that can be represented by
+ the `inputs_ids` passed when calling [`Speech2TextModel`]
+ encoder_layers (`int`, *optional*, defaults to 12):
+ Number of encoder layers.
+ encoder_ffn_dim (`int`, *optional*, defaults to 2048):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in encoder.
+ encoder_attention_heads (`int`, *optional*, defaults to 4):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ decoder_layers (`int`, *optional*, defaults to 6):
+ Number of decoder layers.
+ decoder_ffn_dim (`int`, *optional*, defaults to 2048):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
+ decoder_attention_heads (`int`, *optional*, defaults to 4):
+ Number of attention heads for each attention layer in the Transformer decoder.
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](https://arxiv.org/abs/1909.11556) for
+ more details.
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](https://arxiv.org/abs/1909.11556) for
+ more details.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether the model should return the last key/values attentions (not used by all models).
+ is_encoder_decoder (`bool`, *optional*, defaults to `True`):
+ Whether the model is set up as an encoder-decoder architecture for sequence-to-sequence tasks.
+ activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ d_model (`int`, *optional*, defaults to 256):
+ Dimensionality of the layers and the pooler layer.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ activation_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for activations inside the fully connected layer.
+ init_std (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ decoder_start_token_id (`int`, *optional*, defaults to 2):
+ The initial token ID of the decoder when decoding sequences.
+ scale_embedding (`bool`, *optional*, defaults to `True`):
+ Whether the embeddings are scaled by the square root of `d_model`.
+ pad_token_id (`int`, *optional*, defaults to 1):
+ Padding token id.
+ bos_token_id (`int`, *optional*, defaults to 0):
+ The id of the beginning-of-sequence token.
+ eos_token_id (`int`, *optional*, defaults to 2):
+ The id of the end-of-sequence token.
+ max_source_positions (`int`, *optional*, defaults to 6000):
+ The maximum sequence length of log-mel filter-bank features that this model might ever be used with.
+ max_target_positions (`int`, *optional*, defaults to 1024):
+ The maximum sequence length that this model might ever be used with. Typically, set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ num_conv_layers (`int`, *optional*, defaults to 2):
+ Number of 1D convolutional layers in the conv module.
+ conv_kernel_sizes (`Tuple[int]`, *optional*, defaults to `(5, 5)`):
+ A tuple of integers defining the kernel size of each 1D convolutional layer in the conv module. The length
+ of `conv_kernel_sizes` has to match `num_conv_layers`.
+ conv_channels (`int`, *optional*, defaults to 1024):
+ An integer defining the number of output channels of each convolution layers except the final one in the
+ conv module.
+ input_feat_per_channel (`int`, *optional*, defaults to 80):
+ An integer specifying the size of feature vector. This is also the dimensions of log-mel filter-bank
+ features.
+ input_channels (`int`, *optional*, defaults to 1):
+ An integer specifying number of input channels of the input feature vector.
+
+ Example:
+
+ ```python
+ >>> from transformers import Speech2TextConfig, Speech2TextModel
+
+ >>> # Initializing a Speech2Text s2t_transformer_s style configuration
+ >>> configuration = Speech2TextConfig()
+
+ >>> # Initializing a model (with random weights) from the s2t_transformer_s style configuration
+ >>> model = Speech2TextModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "speech_to_text"
+ keys_to_ignore_at_inference = ["past_key_values"]
+ attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
+
+ def __init__(
+ self,
+ vocab_size=10000,
+ encoder_layers=12,
+ encoder_ffn_dim=2048,
+ encoder_attention_heads=4,
+ decoder_layers=6,
+ decoder_ffn_dim=2048,
+ decoder_attention_heads=4,
+ encoder_layerdrop=0.0,
+ decoder_layerdrop=0.0,
+ use_cache=True,
+ is_encoder_decoder=True,
+ activation_function="relu",
+ d_model=256,
+ dropout=0.1,
+ attention_dropout=0.0,
+ activation_dropout=0.0,
+ init_std=0.02,
+ decoder_start_token_id=2,
+ scale_embedding=True,
+ pad_token_id=1,
+ bos_token_id=0,
+ eos_token_id=2,
+ max_source_positions=6000,
+ max_target_positions=1024,
+ num_conv_layers=2,
+ conv_kernel_sizes=(5, 5),
+ conv_channels=1024,
+ input_feat_per_channel=80,
+ input_channels=1,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.d_model = d_model
+ self.encoder_ffn_dim = encoder_ffn_dim
+ self.encoder_layers = encoder_layers
+ self.encoder_attention_heads = encoder_attention_heads
+ self.decoder_ffn_dim = decoder_ffn_dim
+ self.decoder_layers = decoder_layers
+ self.decoder_attention_heads = decoder_attention_heads
+ self.dropout = dropout
+ self.attention_dropout = attention_dropout
+ self.activation_dropout = activation_dropout
+ self.activation_function = activation_function
+ self.init_std = init_std
+ self.encoder_layerdrop = encoder_layerdrop
+ self.decoder_layerdrop = decoder_layerdrop
+ self.use_cache = use_cache
+ self.num_hidden_layers = encoder_layers
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
+ self.max_source_positions = max_source_positions
+ self.max_target_positions = max_target_positions
+ self.num_conv_layers = num_conv_layers
+ self.conv_kernel_sizes = list(conv_kernel_sizes)
+ self.conv_channels = conv_channels
+ self.input_feat_per_channel = input_feat_per_channel
+ self.input_channels = input_channels
+
+ if len(self.conv_kernel_sizes) != self.num_conv_layers:
+ raise ValueError(
+ "Configuration for convolutional module is incorrect. "
+ "It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` "
+ f"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, "
+ f"`config.num_conv_layers = {self.num_conv_layers}`."
+ )
+
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ is_encoder_decoder=is_encoder_decoder,
+ decoder_start_token_id=decoder_start_token_id,
+ **kwargs,
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/convert_s2t_fairseq_to_tfms.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/convert_s2t_fairseq_to_tfms.py
new file mode 100644
index 0000000000000000000000000000000000000000..eb4d852624790998657161f6b15cd9572aca7f78
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/convert_s2t_fairseq_to_tfms.py
@@ -0,0 +1,121 @@
+# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+
+import torch
+from torch import nn
+
+from transformers import Speech2TextConfig, Speech2TextForConditionalGeneration
+
+
+def remove_ignore_keys_(state_dict):
+ ignore_keys = [
+ "encoder.version",
+ "decoder.version",
+ "model.encoder.version",
+ "model.decoder.version",
+ "decoder.output_projection.weight",
+ "_float_tensor",
+ "encoder.embed_positions._float_tensor",
+ "decoder.embed_positions._float_tensor",
+ ]
+ for k in ignore_keys:
+ state_dict.pop(k, None)
+
+
+def rename_keys(s_dict):
+ keys = list(s_dict.keys())
+ for key in keys:
+ if "transformer_layers" in key:
+ s_dict[key.replace("transformer_layers", "layers")] = s_dict.pop(key)
+ elif "subsample" in key:
+ s_dict[key.replace("subsample", "conv")] = s_dict.pop(key)
+
+
+def make_linear_from_emb(emb):
+ vocab_size, emb_size = emb.weight.shape
+ lin_layer = nn.Linear(vocab_size, emb_size, bias=False)
+ lin_layer.weight.data = emb.weight.data
+ return lin_layer
+
+
+def convert_fairseq_s2t_checkpoint_to_tfms(checkpoint_path, pytorch_dump_folder_path):
+ m2m_100 = torch.load(checkpoint_path, map_location="cpu")
+ args = m2m_100["args"]
+ state_dict = m2m_100["model"]
+ lm_head_weights = state_dict["decoder.output_projection.weight"]
+
+ remove_ignore_keys_(state_dict)
+ rename_keys(state_dict)
+
+ vocab_size = state_dict["decoder.embed_tokens.weight"].shape[0]
+
+ tie_embeds = args.share_decoder_input_output_embed
+
+ conv_kernel_sizes = [int(i) for i in args.conv_kernel_sizes.split(",")]
+ config = Speech2TextConfig(
+ vocab_size=vocab_size,
+ max_source_positions=args.max_source_positions,
+ max_target_positions=args.max_target_positions,
+ encoder_layers=args.encoder_layers,
+ decoder_layers=args.decoder_layers,
+ encoder_attention_heads=args.encoder_attention_heads,
+ decoder_attention_heads=args.decoder_attention_heads,
+ encoder_ffn_dim=args.encoder_ffn_embed_dim,
+ decoder_ffn_dim=args.decoder_ffn_embed_dim,
+ d_model=args.encoder_embed_dim,
+ dropout=args.dropout,
+ attention_dropout=args.attention_dropout,
+ activation_dropout=args.activation_dropout,
+ activation_function="relu",
+ num_conv_layers=len(conv_kernel_sizes),
+ conv_channels=args.conv_channels,
+ conv_kernel_sizes=conv_kernel_sizes,
+ input_feat_per_channel=args.input_feat_per_channel,
+ input_channels=args.input_channels,
+ tie_word_embeddings=tie_embeds,
+ num_beams=5,
+ max_length=200,
+ use_cache=True,
+ decoder_start_token_id=2,
+ early_stopping=True,
+ )
+
+ model = Speech2TextForConditionalGeneration(config)
+ missing, unexpected = model.model.load_state_dict(state_dict, strict=False)
+ if len(missing) > 0 and not set(missing) <= {
+ "encoder.embed_positions.weights",
+ "decoder.embed_positions.weights",
+ }:
+ raise ValueError(
+ "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
+ f" but all the following weights are missing {missing}"
+ )
+
+ if tie_embeds:
+ model.lm_head = make_linear_from_emb(model.model.decoder.embed_tokens)
+ else:
+ model.lm_head.weight.data = lm_head_weights
+
+ model.save_pretrained(pytorch_dump_folder_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
+ parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
+ args = parser.parse_args()
+ convert_fairseq_s2t_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/feature_extraction_speech_to_text.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/feature_extraction_speech_to_text.py
new file mode 100644
index 0000000000000000000000000000000000000000..193f2dda0946f1ca9c121652c95e475f38b3bf0b
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/feature_extraction_speech_to_text.py
@@ -0,0 +1,297 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Feature extractor class for Speech2Text
+"""
+
+from typing import List, Optional, Union
+
+import numpy as np
+
+from ...audio_utils import mel_filter_bank, spectrogram, window_function
+from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
+from ...feature_extraction_utils import BatchFeature
+from ...utils import PaddingStrategy, TensorType, is_speech_available, logging
+
+
+if is_speech_available():
+ import torch
+ import torchaudio.compliance.kaldi as ta_kaldi
+
+logger = logging.get_logger(__name__)
+
+
+class Speech2TextFeatureExtractor(SequenceFeatureExtractor):
+ r"""
+ Constructs a Speech2Text feature extractor.
+
+ This feature extractor inherits from [`Speech2TextFeatureExtractor`] which contains most of the main methods. Users
+ should refer to this superclass for more information regarding those methods.
+
+ This class extracts mel-filter bank features from raw speech using TorchAudio if installed or using numpy
+ otherwise, and applies utterance-level cepstral mean and variance normalization to the extracted features.
+
+ Args:
+ feature_size (`int`, *optional*, defaults to 80):
+ The feature dimension of the extracted features.
+ sampling_rate (`int`, *optional*, defaults to 16000):
+ The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
+ num_mel_bins (`int`, *optional*, defaults to 80):
+ Number of Mel-frequency bins.
+ padding_value (`float`, *optional*, defaults to 0.0):
+ The value that is used to fill the padding vectors.
+ do_ceptral_normalize (`bool`, *optional*, defaults to `True`):
+ Whether or not to apply utterance-level cepstral mean and variance normalization to extracted features.
+ normalize_means (`bool`, *optional*, defaults to `True`):
+ Whether or not to zero-mean normalize the extracted features.
+ normalize_vars (`bool`, *optional*, defaults to `True`):
+ Whether or not to unit-variance normalize the extracted features.
+ """
+
+ model_input_names = ["input_features", "attention_mask"]
+
+ def __init__(
+ self,
+ feature_size=80,
+ sampling_rate=16000,
+ num_mel_bins=80,
+ padding_value=0.0,
+ do_ceptral_normalize=True,
+ normalize_means=True,
+ normalize_vars=True,
+ **kwargs,
+ ):
+ super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
+ self.num_mel_bins = num_mel_bins
+ self.do_ceptral_normalize = do_ceptral_normalize
+ self.normalize_means = normalize_means
+ self.normalize_vars = normalize_vars
+ self.return_attention_mask = True
+
+ if not is_speech_available():
+ mel_filters = mel_filter_bank(
+ num_frequency_bins=256,
+ num_mel_filters=self.num_mel_bins,
+ min_frequency=20,
+ max_frequency=sampling_rate // 2,
+ sampling_rate=sampling_rate,
+ norm=None,
+ mel_scale="kaldi",
+ triangularize_in_mel_space=True,
+ )
+
+ self.mel_filters = np.pad(mel_filters, ((0, 1), (0, 0)))
+ self.window = window_function(400, "povey", periodic=False)
+
+ def _extract_fbank_features(
+ self,
+ waveform: np.ndarray,
+ ) -> np.ndarray:
+ """
+ Get mel-filter bank features using TorchAudio. Note that TorchAudio requires 16-bit signed integers as inputs
+ and hence the waveform should not be normalized before feature extraction.
+ """
+ waveform = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
+ if is_speech_available():
+ waveform = torch.from_numpy(waveform).unsqueeze(0)
+ features = ta_kaldi.fbank(waveform, num_mel_bins=self.num_mel_bins, sample_frequency=self.sampling_rate)
+ features = features.numpy()
+ else:
+ waveform = np.squeeze(waveform)
+ features = spectrogram(
+ waveform,
+ self.window,
+ frame_length=400,
+ hop_length=160,
+ fft_length=512,
+ power=2.0,
+ center=False,
+ preemphasis=0.97,
+ mel_filters=self.mel_filters,
+ log_mel="log",
+ mel_floor=1.192092955078125e-07,
+ remove_dc_offset=True,
+ ).T
+ return features
+
+ @staticmethod
+ def utterance_cmvn(
+ x: np.ndarray,
+ input_length: int,
+ normalize_means: Optional[bool] = True,
+ normalize_vars: Optional[bool] = True,
+ padding_value: float = 0.0,
+ ) -> np.ndarray:
+ # make sure we normalize float32 arrays
+ if normalize_means:
+ mean = x[:input_length].mean(axis=0)
+ x = np.subtract(x, mean)
+ if normalize_vars:
+ std = x[:input_length].std(axis=0)
+ x = np.divide(x, std)
+
+ if input_length < x.shape[0]:
+ x[input_length:] = padding_value
+
+ # make sure array is in float32
+ x = x.astype(np.float32)
+
+ return x
+
+ def normalize(
+ self, input_features: List[np.ndarray], attention_mask: Optional[np.ndarray] = None
+ ) -> List[np.ndarray]:
+ lengths = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
+ return [
+ self.utterance_cmvn(x, n, self.normalize_means, self.normalize_vars, self.padding_value)
+ for x, n in zip(input_features, lengths)
+ ]
+
+ def __call__(
+ self,
+ raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
+ padding: Union[bool, str, PaddingStrategy] = False,
+ max_length: Optional[int] = None,
+ truncation: bool = False,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ sampling_rate: Optional[int] = None,
+ return_attention_mask: Optional[bool] = None,
+ **kwargs,
+ ) -> BatchFeature:
+ """
+ Main method to featurize and prepare for the model one or several sequence(s).
+
+ Args:
+ raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`):
+ The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float
+ values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not
+ stereo, i.e. single float per timestep.
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
+ index) among:
+
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
+ sequence if provided).
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
+ acceptable input length for the model if that argument is not provided.
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
+ lengths).
+ max_length (`int`, *optional*):
+ Maximum length of the returned list and optionally padding length (see above).
+ truncation (`bool`):
+ Activates truncation to cut input sequences longer than *max_length* to *max_length*.
+ pad_to_multiple_of (`int`, *optional*):
+ If set will pad the sequence to a multiple of the provided value.
+
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
+ `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
+ return_attention_mask (`bool`, *optional*):
+ Whether to return the attention mask. If left to the default, will return the attention mask according
+ to the specific feature_extractor's default.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+
+
+ For Speech2TextTransformer models, `attention_mask` should always be passed for batched inference, to
+ avoid subtle bugs.
+
+
+
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
+ If set, will return tensors instead of list of python integers. Acceptable values are:
+
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
+ - `'np'`: Return Numpy `np.ndarray` objects.
+ sampling_rate (`int`, *optional*):
+ The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
+ `sampling_rate` at the forward call to prevent silent errors.
+ padding_value (`float`, defaults to 0.0):
+ The value that is used to fill the padding values / vectors.
+ """
+
+ if sampling_rate is not None:
+ if sampling_rate != self.sampling_rate:
+ raise ValueError(
+ f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
+ f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
+ f" {self.sampling_rate} and not {sampling_rate}."
+ )
+ else:
+ logger.warning(
+ "It is strongly recommended to pass the `sampling_rate` argument to this function. "
+ "Failing to do so can result in silent errors that might be hard to debug."
+ )
+
+ is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1
+ if is_batched_numpy and len(raw_speech.shape) > 2:
+ raise ValueError(f"Only mono-channel audio is supported for input to {self}")
+ is_batched = is_batched_numpy or (
+ isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list)))
+ )
+
+ if is_batched:
+ raw_speech = [np.asarray(speech, dtype=np.float32) for speech in raw_speech]
+ elif not is_batched and not isinstance(raw_speech, np.ndarray):
+ raw_speech = np.asarray(raw_speech, dtype=np.float32)
+ elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64):
+ raw_speech = raw_speech.astype(np.float32)
+
+ # always return batch
+ if not is_batched:
+ raw_speech = [raw_speech]
+
+ # extract fbank features
+ features = [self._extract_fbank_features(waveform) for waveform in raw_speech]
+
+ # convert into correct format for padding
+ encoded_inputs = BatchFeature({"input_features": features})
+
+ padded_inputs = self.pad(
+ encoded_inputs,
+ padding=padding,
+ max_length=max_length,
+ truncation=truncation,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_attention_mask=return_attention_mask,
+ **kwargs,
+ )
+
+ # make sure list is in array format
+ input_features = padded_inputs.get("input_features")
+ if isinstance(input_features[0], list):
+ padded_inputs["input_features"] = [np.asarray(feature, dtype=np.float32) for feature in input_features]
+
+ attention_mask = padded_inputs.get("attention_mask")
+ if attention_mask is not None:
+ padded_inputs["attention_mask"] = [np.asarray(array, dtype=np.int32) for array in attention_mask]
+
+ # Utterance-level cepstral mean and variance normalization
+ if self.do_ceptral_normalize:
+ attention_mask = (
+ np.array(attention_mask, dtype=np.int32)
+ if self._get_padding_strategies(padding, max_length=max_length) is not PaddingStrategy.DO_NOT_PAD
+ else None
+ )
+ padded_inputs["input_features"] = self.normalize(
+ padded_inputs["input_features"], attention_mask=attention_mask
+ )
+
+ if return_tensors is not None:
+ padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
+
+ return padded_inputs
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/modeling_speech_to_text.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/modeling_speech_to_text.py
new file mode 100644
index 0000000000000000000000000000000000000000..6898cc081fe91f122d1a5a7e059251b7a5a25909
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/modeling_speech_to_text.py
@@ -0,0 +1,1370 @@
+# coding=utf-8
+# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch Speech2Text model."""
+
+import math
+from typing import Optional, Tuple, Union
+
+import torch
+from torch import nn
+from torch.nn import CrossEntropyLoss
+
+from ...activations import ACT2FN
+from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
+from ...modeling_outputs import (
+ BaseModelOutput,
+ BaseModelOutputWithPastAndCrossAttentions,
+ Seq2SeqLMOutput,
+ Seq2SeqModelOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_speech_to_text import Speech2TextConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "Speech2TextConfig"
+
+
+from ..deprecated._archive_maps import SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+# Copied from transformers.models.bart.modeling_bart.shift_tokens_right
+def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
+ """
+ Shift input ids one token to the right.
+ """
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
+ shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
+ shifted_input_ids[:, 0] = decoder_start_token_id
+
+ if pad_token_id is None:
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
+ # replace possible -100 values in labels by `pad_token_id`
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
+
+ return shifted_input_ids
+
+
+class Conv1dSubsampler(nn.Module):
+ """
+ Convolutional subsampler: a stack of 1D convolution (along temporal dimension) followed by non-linear activation
+ via gated linear units (https://arxiv.org/abs/1911.08460)
+ """
+
+ def __init__(self, config):
+ super(Conv1dSubsampler, self).__init__()
+ self.config = config
+ self.num_layers = config.num_conv_layers
+ self.in_channels = config.input_feat_per_channel * config.input_channels
+ self.mid_channels = config.conv_channels
+ self.out_channels = config.d_model
+ self.kernel_sizes = config.conv_kernel_sizes
+
+ self.conv_layers = nn.ModuleList(
+ nn.Conv1d(
+ self.in_channels if i == 0 else self.mid_channels // 2,
+ self.mid_channels if i < self.num_layers - 1 else self.out_channels * 2,
+ kernel_size=k,
+ stride=2,
+ padding=k // 2,
+ )
+ for i, k in enumerate(self.kernel_sizes)
+ )
+
+ def forward(self, input_features):
+ hidden_states = input_features.transpose(1, 2).contiguous() # -> B x (C x D) x T
+ for conv in self.conv_layers:
+ hidden_states = conv(hidden_states)
+ hidden_states = nn.functional.glu(hidden_states, dim=1)
+ hidden_states = hidden_states.transpose(1, 2).contiguous() # -> T x B x (C x D)
+ return hidden_states
+
+
+class Speech2TextSinusoidalPositionalEmbedding(nn.Module):
+ """This module produces sinusoidal positional embeddings of any length."""
+
+ def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None):
+ super().__init__()
+ self.offset = 2
+ self.embedding_dim = embedding_dim
+ self.padding_idx = padding_idx
+ self.make_weights(num_positions + self.offset, embedding_dim, padding_idx)
+
+ def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
+ emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx)
+ if hasattr(self, "weights"):
+ # in forward put the weights on the correct dtype and device of the param
+ emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device)
+
+ self.weights = nn.Parameter(emb_weights)
+ self.weights.requires_grad = False
+ self.weights.detach_()
+
+ @staticmethod
+ def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
+ """
+ Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the
+ description in Section 3.5 of "Attention Is All You Need".
+ """
+ half_dim = embedding_dim // 2
+ emb = math.log(10000) / (half_dim - 1)
+ emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb)
+ emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0)
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
+ if embedding_dim % 2 == 1:
+ # zero pad
+ emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
+ if padding_idx is not None:
+ emb[padding_idx, :] = 0
+ return emb.to(torch.get_default_dtype())
+
+ @torch.no_grad()
+ def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0):
+ bsz, seq_len = input_ids.size()
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
+ position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to(
+ input_ids.device
+ )
+
+ # expand embeddings if needed
+ max_pos = self.padding_idx + 1 + seq_len
+ if max_pos > self.weights.size(0):
+ self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx)
+
+ return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, -1).detach()
+
+ def create_position_ids_from_input_ids(
+ self, input_ids: torch.Tensor, padding_idx: int, past_key_values_length: Optional[int] = 0
+ ):
+ """
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
+ symbols are ignored. This is modified from fairseq's `utils.make_positions`.
+
+ Args:
+ x: torch.Tensor x:
+ Returns: torch.Tensor
+ """
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
+ mask = input_ids.ne(padding_idx).int()
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
+ return incremental_indices.long() + padding_idx
+
+
+# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Speech2Text
+class Speech2TextAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ is_decoder: bool = False,
+ bias: bool = True,
+ is_causal: bool = False,
+ config: Optional[Speech2TextConfig] = None,
+ ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.dropout = dropout
+ self.head_dim = embed_dim // num_heads
+ self.config = config
+
+ if (self.head_dim * num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+ self.is_causal = is_causal
+
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ key_value_states: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+
+ bsz, tgt_len, _ = hidden_states.size()
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scaling
+ # get key, value proj
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
+ # the provided `key_value_states` to support prefix tuning
+ if (
+ is_cross_attention
+ and past_key_value is not None
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
+ ):
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
+ key_states = key_states.reshape(*proj_shape)
+ value_states = value_states.reshape(*proj_shape)
+
+ src_len = key_states.size(1)
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
+
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
+ )
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if layer_head_mask is not None:
+ if layer_head_mask.size() != (self.num_heads,):
+ raise ValueError(
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
+ f" {layer_head_mask.size()}"
+ )
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ if output_attentions:
+ # this operation is a bit awkward, but it's required to
+ # make sure that attn_weights keeps its gradient.
+ # In order to do so, attn_weights have to be reshaped
+ # twice and have to be reused in the following
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
+ else:
+ attn_weights_reshaped = None
+
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ attn_output = torch.bmm(attn_probs, value_states)
+
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
+ attn_output = attn_output.transpose(1, 2)
+
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
+ # partitioned across GPUs when using tensor-parallelism.
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped, past_key_value
+
+
+SPEECH_TO_TEXT_ATTENTION_CLASSES = {"eager": Speech2TextAttention}
+
+
+# Copied from transformers.models.mbart.modeling_mbart.MBartEncoderLayer with MBart->Speech2Text, MBART->SPEECH_TO_TEXT
+class Speech2TextEncoderLayer(nn.Module):
+ def __init__(self, config: Speech2TextConfig):
+ super().__init__()
+ self.embed_dim = config.d_model
+
+ self.self_attn = SPEECH_TO_TEXT_ATTENTION_CLASSES[config._attn_implementation](
+ embed_dim=self.embed_dim,
+ num_heads=config.encoder_attention_heads,
+ dropout=config.attention_dropout,
+ config=config,
+ )
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+ self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
+ self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: torch.Tensor,
+ layer_head_mask: torch.Tensor,
+ output_attentions: bool = False,
+ ) -> torch.Tensor:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
+ `(encoder_attention_heads,)`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+ hidden_states, attn_weights, _ = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ residual = hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ if hidden_states.dtype == torch.float16 and (
+ torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
+ ):
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+# Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer with MBart->Speech2Text, MBART->SPEECH_TO_TEXT
+class Speech2TextDecoderLayer(nn.Module):
+ def __init__(self, config: Speech2TextConfig):
+ super().__init__()
+ self.embed_dim = config.d_model
+
+ self.self_attn = SPEECH_TO_TEXT_ATTENTION_CLASSES[config._attn_implementation](
+ embed_dim=self.embed_dim,
+ num_heads=config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=True,
+ is_causal=True,
+ config=config,
+ )
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.encoder_attn = SPEECH_TO_TEXT_ATTENTION_CLASSES[config._attn_implementation](
+ self.embed_dim,
+ config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=True,
+ config=config,
+ )
+ self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
+ self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = True,
+ ) -> torch.Tensor:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ encoder_hidden_states (`torch.FloatTensor`):
+ cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
+ encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
+ `(encoder_attention_heads,)`.
+ cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
+ size `(decoder_attention_heads,)`.
+ past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Self Attention
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ past_key_value=self_attn_past_key_value,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ # Cross-Attention Block
+ cross_attn_present_key_value = None
+ cross_attn_weights = None
+ if encoder_hidden_states is not None:
+ residual = hidden_states
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
+
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
+ hidden_states=hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attention_mask,
+ layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=cross_attn_past_key_value,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ # add cross-attn to positions 3,4 of present_key_value tuple
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights, cross_attn_weights)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ return outputs
+
+
+class Speech2TextPreTrainedModel(PreTrainedModel):
+ config_class = Speech2TextConfig
+ base_model_prefix = "model"
+ main_input_name = "input_features"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ std = self.config.init_std
+ if isinstance(module, (nn.Linear, nn.Conv1d)):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+ def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
+ """
+ Computes the output length of the convolutional layers
+ """
+ for i in range(self.config.num_conv_layers):
+ input_lengths = (input_lengths - 1) // 2 + 1
+
+ return input_lengths
+
+ def _get_feature_vector_attention_mask(self, feature_vector_length, attention_mask):
+ # generate creates 3D attention mask, because of the shape of input_features
+ # convert it to 2D if thats the case
+ if len(attention_mask.shape) > 2:
+ attention_mask = attention_mask[:, :, -1]
+
+ subsampled_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1))
+ bsz = attention_mask.size()[0]
+ attention_mask = torch.zeros(
+ (bsz, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
+ )
+
+ # these two operations makes sure that all values
+ # before the output lengths indices are attended to
+ attention_mask[(torch.arange(bsz, device=attention_mask.device), subsampled_lengths - 1)] = 1
+ attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).long()
+ return attention_mask
+
+
+SPEECH_TO_TEXT_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`Speech2TextConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+SPEECH_TO_TEXT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, feature_size)`):
+ Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be obtained
+ by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.*
+ via the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the
+ [`AutoFeatureExtractor`] should be used for extracting the fbank features, padding and conversion into a
+ tensor of type `torch.FloatTensor`. See [`~Speech2TextFeatureExtractor.__call__`]
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,
+ 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`SpeechToTextTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ SpeechToText uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+ decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+
+ If you want to change padding behavior, you should read
+ [`modeling_speech_to_text._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the
+ paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
+ input (see `past_key_values`). This is useful if you want more control over how to convert
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+class Speech2TextEncoder(Speech2TextPreTrainedModel):
+ """
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
+ [`Speech2TextEncoderLayer`].
+
+ Args:
+ config: Speech2TextConfig
+ embed_tokens (nn.Embedding): output embedding
+ """
+
+ def __init__(self, config: Speech2TextConfig):
+ super().__init__(config)
+
+ self.dropout = config.dropout
+ self.layerdrop = config.encoder_layerdrop
+
+ embed_dim = config.d_model
+ self.padding_idx = config.pad_token_id
+ self.max_source_positions = config.max_source_positions
+ self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
+
+ self.conv = Conv1dSubsampler(config)
+
+ self.embed_positions = Speech2TextSinusoidalPositionalEmbedding(
+ self.max_source_positions,
+ embed_dim,
+ self.padding_idx,
+ )
+ self.layers = nn.ModuleList([Speech2TextEncoderLayer(config) for _ in range(config.encoder_layers)])
+ self.layer_norm = nn.LayerNorm(config.d_model)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def forward(
+ self,
+ input_features,
+ attention_mask=None,
+ head_mask=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ ):
+ r"""
+ Args:
+ input_features (`torch.LongTensor` of shape `(batch_size, sequence_length, feature_size)`):
+ Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be
+ obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a
+ `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into
+ `input_features`, the [`AutoFeatureExtractor`] should be used for extracting the fbank features,
+ padding and conversion into a tensor of type `torch.FloatTensor`. See
+ [`~Speech2TextFeatureExtractor.__call__`]
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing convolution and attention on padding token indices. Mask values selected in
+ `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ inputs_embeds = self.conv(input_features)
+ inputs_embeds = self.embed_scale * inputs_embeds
+
+ # subsample attention mask if necessary
+ if attention_mask is not None:
+ attention_mask = self._get_feature_vector_attention_mask(inputs_embeds.shape[1], attention_mask)
+ padding_mask = attention_mask.ne(1).long()
+ else:
+ padding_mask = torch.zeros(inputs_embeds.shape[:2], dtype=torch.long, device=inputs_embeds.device)
+
+ embed_pos = self.embed_positions(padding_mask)
+
+ hidden_states = inputs_embeds + embed_pos
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ # expand attention_mask
+ if attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ # check if head_mask has a correct number of layers specified if desired
+ if head_mask is not None:
+ assert head_mask.size()[0] == (
+ len(self.layers)
+ ), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
+
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ to_drop = False
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop: # skip the layer
+ to_drop = True
+
+ if to_drop:
+ layer_outputs = (None, None)
+ else:
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ encoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ (head_mask[idx] if head_mask is not None else None),
+ output_attentions,
+ )
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ hidden_states = self.layer_norm(hidden_states)
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+
+class Speech2TextDecoder(Speech2TextPreTrainedModel):
+ """
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`Speech2TextDecoderLayer`]
+
+ Args:
+ config: Speech2TextConfig
+ embed_tokens (nn.Embedding): output embedding
+ """
+
+ def __init__(self, config: Speech2TextConfig):
+ super().__init__(config)
+ self.dropout = config.dropout
+ self.layerdrop = config.decoder_layerdrop
+ self.padding_idx = config.pad_token_id
+ self.max_target_positions = config.max_target_positions
+ self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
+
+ self.embed_positions = Speech2TextSinusoidalPositionalEmbedding(
+ self.max_target_positions,
+ config.d_model,
+ self.padding_idx,
+ )
+
+ self.layers = nn.ModuleList([Speech2TextDecoderLayer(config) for _ in range(config.decoder_layers)])
+
+ self.layer_norm = nn.LayerNorm(config.d_model)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.embed_tokens = value
+
+ def forward(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ head_mask=None,
+ cross_attn_head_mask=None,
+ past_key_values=None,
+ inputs_embeds=None,
+ use_cache=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ ):
+ r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`Speech2TextTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
+ of the decoder.
+ encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
+ selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention
+ on hidden heads. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # retrieve input_ids and inputs_embeds
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = input_ids.size()
+ input_ids = input_ids.view(-1, input_shape[-1])
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
+
+ # past_key_values_length
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+
+ attention_mask = _prepare_4d_causal_attention_mask(
+ attention_mask, input_shape, inputs_embeds, past_key_values_length
+ )
+
+ # expand encoder attention mask
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ encoder_attention_mask = _prepare_4d_attention_mask(
+ encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
+ )
+
+ # embed positions
+ positions = self.embed_positions(input_ids, past_key_values_length=past_key_values_length)
+
+ hidden_states = inputs_embeds + positions
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache = True` is incompatible with gradient checkpointing. Setting `use_cache = False`..."
+ )
+ use_cache = False
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
+ next_decoder_cache = () if use_cache else None
+
+ # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
+ for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
+ if attn_mask is not None:
+ assert attn_mask.size()[0] == (len(self.layers)), (
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
+ f" {head_mask.size()[0]}."
+ )
+ for idx, decoder_layer in enumerate(self.layers):
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop:
+ continue
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ head_mask[idx] if head_mask is not None else None,
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
+ None,
+ output_attentions,
+ use_cache,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ cross_attn_layer_head_mask=(
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
+ ),
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ if encoder_hidden_states is not None:
+ all_cross_attentions += (layer_outputs[2],)
+
+ hidden_states = self.layer_norm(hidden_states)
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ next_cache = next_decoder_cache if use_cache else None
+ if not return_dict:
+ return tuple(
+ v
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ "The bare Speech2Text Model outputting raw hidden-states without any specific head on top.",
+ SPEECH_TO_TEXT_START_DOCSTRING,
+)
+class Speech2TextModel(Speech2TextPreTrainedModel):
+ def __init__(self, config: Speech2TextConfig):
+ super().__init__(config)
+
+ self.encoder = Speech2TextEncoder(config)
+ self.decoder = Speech2TextDecoder(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.decoder.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.decoder.embed_tokens = value
+
+ def get_encoder(self):
+ return self.encoder
+
+ def get_decoder(self):
+ return self.decoder
+
+ @add_start_docstrings_to_model_forward(SPEECH_TO_TEXT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_features: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> import torch
+ >>> from transformers import Speech2TextModel, AutoFeatureExtractor
+ >>> from datasets import load_dataset
+
+ >>> model = Speech2TextModel.from_pretrained("facebook/s2t-small-librispeech-asr")
+ >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/s2t-small-librispeech-asr")
+ >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
+ >>> inputs = feature_extractor(
+ ... ds[0]["audio"]["array"], sampling_rate=ds[0]["audio"]["sampling_rate"], return_tensors="pt"
+ ... )
+ >>> input_features = inputs.input_features
+ >>> decoder_input_ids = torch.tensor([[1, 1]]) * model.config.decoder_start_token_id
+ >>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state
+ >>> list(last_hidden_state.shape)
+ [1, 2, 256]
+ ```"""
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if encoder_outputs is None:
+ encoder_outputs = self.encoder(
+ input_features,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ # downsample encoder attention mask
+ if attention_mask is not None:
+ encoder_attention_mask = self._get_feature_vector_attention_mask(
+ encoder_outputs[0].shape[1], attention_mask
+ )
+ else:
+ encoder_attention_mask = None
+
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=encoder_attention_mask,
+ head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+
+ return Seq2SeqModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ "The Speech2Text Model with a language modeling head. Can be used for summarization.",
+ SPEECH_TO_TEXT_START_DOCSTRING,
+)
+class Speech2TextForConditionalGeneration(Speech2TextPreTrainedModel):
+ base_model_prefix = "model"
+ _tied_weights_keys = ["lm_head.weight"]
+
+ def __init__(self, config: Speech2TextConfig):
+ super().__init__(config)
+ self.model = Speech2TextModel(config)
+ self.lm_head = nn.Linear(config.d_model, self.config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_encoder(self):
+ return self.model.get_encoder()
+
+ def get_decoder(self):
+ return self.model.get_decoder()
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ @add_start_docstrings_to_model_forward(SPEECH_TO_TEXT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_features: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the language modeling loss. Indices should either be in `[0, ..., config.vocab_size]`
+ or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is
+ only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> import torch
+ >>> from transformers import Speech2TextProcessor, Speech2TextForConditionalGeneration
+ >>> from datasets import load_dataset
+
+ >>> model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-librispeech-asr")
+ >>> processor = Speech2TextProcessor.from_pretrained("facebook/s2t-small-librispeech-asr")
+
+
+ >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
+
+ >>> inputs = processor(
+ ... ds[0]["audio"]["array"], sampling_rate=ds[0]["audio"]["sampling_rate"], return_tensors="pt"
+ ... )
+ >>> input_features = inputs.input_features
+
+ >>> generated_ids = model.generate(inputs=input_features)
+
+ >>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
+ >>> transcription
+ 'mister quilter is the apostle of the middle classes and we are glad to welcome his gospel'
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if labels is not None:
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
+ decoder_input_ids = shift_tokens_right(
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
+ )
+
+ outputs = self.model(
+ input_features,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ encoder_outputs=encoder_outputs,
+ decoder_attention_mask=decoder_attention_mask,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ decoder_inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ lm_logits = self.lm_head(outputs[0])
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (lm_logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return Seq2SeqLMOutput(
+ loss=loss,
+ logits=lm_logits,
+ past_key_values=outputs.past_key_values,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ head_mask=None,
+ decoder_head_mask=None,
+ cross_attn_head_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ # cut decoder_input_ids if past is used
+ if past_key_values is not None:
+ decoder_input_ids = decoder_input_ids[:, -1:]
+
+ return {
+ "encoder_outputs": encoder_outputs,
+ "past_key_values": past_key_values,
+ "decoder_input_ids": decoder_input_ids,
+ "attention_mask": attention_mask,
+ "head_mask": head_mask,
+ "decoder_head_mask": decoder_head_mask,
+ "cross_attn_head_mask": cross_attn_head_mask,
+ "use_cache": use_cache, # change this to avoid caching (presumably for debugging)
+ }
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+ return reordered_past
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/modeling_tf_speech_to_text.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
new file mode 100644
index 0000000000000000000000000000000000000000..8fd6bd21a593c90d671a595b5faa056a97e71f19
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
@@ -0,0 +1,1607 @@
+# coding=utf-8
+# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" TensorFlow Speech2Text model."""
+
+
+from __future__ import annotations
+
+import random
+from typing import Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ...activations_tf import get_tf_activation, glu
+from ...modeling_tf_outputs import (
+ TFBaseModelOutput,
+ TFBaseModelOutputWithPastAndCrossAttentions,
+ TFSeq2SeqLMOutput,
+ TFSeq2SeqModelOutput,
+)
+from ...modeling_tf_utils import (
+ TFCausalLanguageModelingLoss,
+ TFModelInputType,
+ TFPreTrainedModel,
+ TFSharedEmbeddings,
+ keras,
+ keras_serializable,
+ unpack_inputs,
+)
+from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_speech_to_text import Speech2TextConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "Speech2TextConfig"
+_CHECKPOINT_FOR_DOC = "facebook/s2t-small-librispeech-asr"
+
+
+from ..deprecated._archive_maps import TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+LARGE_NEGATIVE = -1e8
+
+
+# Copied from transformers.models.bart.modeling_tf_bart.shift_tokens_right
+def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int):
+ pad_token_id = tf.cast(pad_token_id, input_ids.dtype)
+ decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype)
+ start_tokens = tf.fill(
+ (shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype)
+ )
+ shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)
+ # replace possible -100 values in labels by `pad_token_id`
+ shifted_input_ids = tf.where(
+ shifted_input_ids == -100,
+ tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)),
+ shifted_input_ids,
+ )
+
+ # "Verify that `labels` has only positive values and -100"
+ assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype))
+
+ # Make sure the assertion op is called by wrapping the result in an identity no-op
+ with tf.control_dependencies([assert_gte0]):
+ shifted_input_ids = tf.identity(shifted_input_ids)
+
+ return shifted_input_ids
+
+
+# Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask
+def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0):
+ """
+ Make causal mask used for bi-directional self-attention.
+ """
+ bsz = input_ids_shape[0]
+ tgt_len = input_ids_shape[1]
+ mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE
+ mask_cond = tf.range(shape_list(mask)[-1])
+
+ mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask)
+
+ if past_key_values_length > 0:
+ mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1)
+
+ return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1))
+
+
+# Copied from transformers.models.bart.modeling_tf_bart._expand_mask
+def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None):
+ """
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
+ """
+ src_len = shape_list(mask)[1]
+ tgt_len = tgt_len if tgt_len is not None else src_len
+ one_cst = tf.constant(1.0)
+ mask = tf.cast(mask, dtype=one_cst.dtype)
+ expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
+
+ return (one_cst - expanded_mask) * LARGE_NEGATIVE
+
+
+class TFConv1dSubsampler(keras.layers.Layer):
+ """
+ Convolutional subsampler: a stack of 1D convolution (along temporal dimension) followed by non-linear activation
+ via gated linear units (https://arxiv.org/abs/1911.08460)
+ """
+
+ def __init__(self, config: Speech2TextConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+ self.num_layers = config.num_conv_layers
+ self.in_channels = config.input_feat_per_channel * config.input_channels
+ self.mid_channels = config.conv_channels
+ self.out_channels = config.d_model
+ self.kernel_sizes = config.conv_kernel_sizes
+
+ self.conv_layers = [
+ keras.layers.Conv1D(
+ filters=self.mid_channels if i < self.num_layers - 1 else self.out_channels * 2,
+ kernel_size=k,
+ strides=2,
+ name=f"conv_layers.{i}",
+ )
+ for i, k in enumerate(self.kernel_sizes)
+ ]
+
+ def call(self, input_features: tf.Tensor) -> tf.Tensor:
+ # TF Conv1D assumes Batch x Time x Channels, same as the input
+ hidden_states = tf.cast(input_features, tf.float32)
+ for i, conv in enumerate(self.conv_layers):
+ # equivalent to `padding=k // 2` on PT's `nn.Conv1d`
+ pad_len = self.kernel_sizes[i] // 2
+ hidden_shapes = shape_list(hidden_states)
+ hidden_states = tf.concat(
+ (
+ tf.zeros((hidden_shapes[0], pad_len, hidden_shapes[2])),
+ hidden_states,
+ tf.zeros((hidden_shapes[0], pad_len, hidden_shapes[2])),
+ ),
+ axis=1,
+ )
+
+ hidden_states = conv(hidden_states)
+ hidden_states = glu(hidden_states, axis=2) # GLU over the Channel dimension
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "conv_layers", None) is not None:
+ for i, layer in enumerate(self.conv_layers):
+ with tf.name_scope(layer.name):
+ layer.build([None, None, self.in_channels] if i == 0 else [None, None, self.mid_channels // 2])
+
+
+class TFSpeech2TextSinusoidalPositionalEmbedding(keras.layers.Layer):
+ """This module produces sinusoidal positional embeddings of any length."""
+
+ def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None, **kwargs):
+ super().__init__(**kwargs)
+ self.offset = 2
+ self.embedding_dim = embedding_dim
+ self.padding_idx = padding_idx
+ self.embedding_weights = self._get_embedding(num_positions + self.offset, embedding_dim, padding_idx)
+
+ @staticmethod
+ def _get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None) -> tf.Tensor:
+ """
+ Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the
+ description in Section 3.5 of "Attention Is All You Need".
+ """
+ half_dim = embedding_dim // 2
+ emb = tf.math.log(10000.0) / (half_dim - 1)
+ emb = tf.math.exp(tf.range(half_dim, dtype=tf.float32) * -emb)
+ emb = tf.expand_dims(tf.range(num_embeddings, dtype=tf.float32), axis=1) * tf.expand_dims(emb, axis=0)
+ emb = tf.reshape(tf.concat([tf.math.sin(emb), tf.math.cos(emb)], axis=1), shape=[num_embeddings, -1])
+ if embedding_dim % 2 == 1:
+ # zero pad
+ emb = tf.concat([emb, tf.zeros(num_embeddings, 1)], axis=1)
+ if padding_idx is not None:
+ emb = tf.concat([emb[:padding_idx, :], tf.zeros((1, tf.shape(emb)[1])), emb[padding_idx + 1 :, :]], axis=0)
+ return emb
+
+ def call(self, input_ids: tf.Tensor, past_key_values_length: int = 0) -> tf.Tensor:
+ bsz, seq_len = shape_list(input_ids)
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
+ position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
+
+ # Matt: The PyTorch code does a lot of work to cache the embeddings, setting the cached values as a
+ # model attribute in the forward pass. This is extremely forbidden in TF, which wants forward calls to be
+ # idempotent. TF doesn't need that caching anyway, since it can just store constants during compilation,
+ # so we just remove all of that code.
+ embeddings = self._get_embedding(
+ self.padding_idx + 1 + seq_len + self.offset + past_key_values_length, self.embedding_dim, self.padding_idx
+ )
+ return tf.reshape(tf.gather(embeddings, tf.reshape(position_ids, (-1,)), axis=0), (bsz, seq_len, -1))
+
+ @staticmethod
+ def create_position_ids_from_input_ids(
+ input_ids: tf.Tensor, padding_idx: int, past_key_values_length: Optional[int] = 0
+ ) -> tf.Tensor:
+ """
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
+ symbols are ignored. This is modified from fairseq's `utils.make_positions`.
+
+ Args:
+ x: tf.Tensor x:
+ Returns: tf.Tensor
+ """
+ mask = tf.cast(tf.math.not_equal(input_ids, padding_idx), dtype=tf.int32)
+ incremental_indices = (tf.math.cumsum(mask, axis=1) + past_key_values_length) * mask
+ return tf.cast(incremental_indices, dtype=tf.int64) + padding_idx
+
+
+# Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->Speech2Text
+class TFSpeech2TextAttention(keras.layers.Layer):
+ """Multi-headed attention from "Attention Is All You Need"""
+
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ is_decoder: bool = False,
+ bias: bool = True,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ self.embed_dim = embed_dim
+
+ self.num_heads = num_heads
+ self.dropout = keras.layers.Dropout(dropout)
+ self.head_dim = embed_dim // num_heads
+ if (self.head_dim * num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+
+ self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj")
+ self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj")
+ self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj")
+ self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj")
+
+ def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):
+ return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ key_value_states: tf.Tensor | None = None,
+ past_key_value: Tuple[Tuple[tf.Tensor]] | None = None,
+ attention_mask: tf.Tensor | None = None,
+ layer_head_mask: tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Tuple[tf.Tensor, tf.Tensor | None]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+ bsz, tgt_len, embed_dim = shape_list(hidden_states)
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scaling
+ # get key, value proj
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = tf.concat([past_key_value[0], key_states], axis=2)
+ value_states = tf.concat([past_key_value[1], value_states], axis=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)
+ key_states = tf.reshape(key_states, proj_shape)
+ value_states = tf.reshape(value_states, proj_shape)
+
+ src_len = shape_list(key_states)[1]
+ attn_weights = tf.matmul(query_states, key_states, transpose_b=True)
+
+ tf.debugging.assert_equal(
+ shape_list(attn_weights),
+ [bsz * self.num_heads, tgt_len, src_len],
+ message=(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {shape_list(attn_weights)}"
+ ),
+ )
+
+ if attention_mask is not None:
+ tf.debugging.assert_equal(
+ shape_list(attention_mask),
+ [bsz, 1, tgt_len, src_len],
+ message=(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
+ f" {shape_list(attention_mask)}"
+ ),
+ )
+
+ attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)
+ attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask
+ attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
+
+ attn_weights = stable_softmax(attn_weights, axis=-1)
+
+ if layer_head_mask is not None:
+ tf.debugging.assert_equal(
+ shape_list(layer_head_mask),
+ [self.num_heads],
+ message=(
+ f"Head mask for a single layer should be of size {(self.num_heads)}, but is"
+ f" {shape_list(layer_head_mask)}"
+ ),
+ )
+
+ attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(
+ attn_weights, (bsz, self.num_heads, tgt_len, src_len)
+ )
+ attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
+
+ attn_probs = self.dropout(attn_weights, training=training)
+ attn_output = tf.matmul(attn_probs, value_states)
+
+ tf.debugging.assert_equal(
+ shape_list(attn_output),
+ [bsz * self.num_heads, tgt_len, self.head_dim],
+ message=(
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {shape_list(attn_output)}"
+ ),
+ )
+
+ attn_output = tf.transpose(
+ tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)
+ )
+ attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))
+
+ attn_output = self.out_proj(attn_output)
+ attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))
+
+ return attn_output, attn_weights, past_key_value
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "k_proj", None) is not None:
+ with tf.name_scope(self.k_proj.name):
+ self.k_proj.build([None, None, self.embed_dim])
+ if getattr(self, "q_proj", None) is not None:
+ with tf.name_scope(self.q_proj.name):
+ self.q_proj.build([None, None, self.embed_dim])
+ if getattr(self, "v_proj", None) is not None:
+ with tf.name_scope(self.v_proj.name):
+ self.v_proj.build([None, None, self.embed_dim])
+ if getattr(self, "out_proj", None) is not None:
+ with tf.name_scope(self.out_proj.name):
+ self.out_proj.build([None, None, self.embed_dim])
+
+
+class TFSpeech2TextEncoderLayer(keras.layers.Layer):
+ def __init__(self, config: Speech2TextConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.embed_dim = config.d_model
+ self.self_attn = TFSpeech2TextAttention(
+ self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn"
+ )
+ self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
+ self.dropout = keras.layers.Dropout(config.dropout)
+ self.activation_fn = get_tf_activation(config.activation_function)
+ self.activation_dropout = keras.layers.Dropout(config.activation_dropout)
+ self.fc1 = keras.layers.Dense(config.encoder_ffn_dim, name="fc1")
+ self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2")
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
+ self.config = config
+
+ def call(
+ self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, layer_head_mask: tf.Tensor, training: bool = False
+ ):
+ """
+ Args:
+ hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`tf.Tensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
+ `(encoder_attention_heads,)`
+ """
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+ hidden_states, self_attn_weights, _ = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ training=training,
+ )
+
+ tf.debugging.assert_equal(
+ shape_list(hidden_states),
+ shape_list(residual),
+ message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}",
+ )
+
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = residual + hidden_states
+
+ residual = hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = self.activation_dropout(hidden_states, training=training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = residual + hidden_states
+
+ return hidden_states, self_attn_weights
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "self_attn", None) is not None:
+ with tf.name_scope(self.self_attn.name):
+ self.self_attn.build(None)
+ if getattr(self, "self_attn_layer_norm", None) is not None:
+ with tf.name_scope(self.self_attn_layer_norm.name):
+ self.self_attn_layer_norm.build([None, None, self.embed_dim])
+ if getattr(self, "fc1", None) is not None:
+ with tf.name_scope(self.fc1.name):
+ self.fc1.build([None, None, self.embed_dim])
+ if getattr(self, "fc2", None) is not None:
+ with tf.name_scope(self.fc2.name):
+ self.fc2.build([None, None, self.config.encoder_ffn_dim])
+ if getattr(self, "final_layer_norm", None) is not None:
+ with tf.name_scope(self.final_layer_norm.name):
+ self.final_layer_norm.build([None, None, self.embed_dim])
+
+
+class TFSpeech2TextDecoderLayer(keras.layers.Layer):
+ def __init__(self, config: Speech2TextConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.embed_dim = config.d_model
+
+ self.self_attn = TFSpeech2TextAttention(
+ embed_dim=self.embed_dim,
+ num_heads=config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ name="self_attn",
+ is_decoder=True,
+ )
+ self.dropout = keras.layers.Dropout(config.dropout)
+ self.activation_fn = get_tf_activation(config.activation_function)
+ self.activation_dropout = keras.layers.Dropout(config.activation_dropout)
+
+ self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
+ self.encoder_attn = TFSpeech2TextAttention(
+ self.embed_dim,
+ config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ name="encoder_attn",
+ is_decoder=True,
+ )
+ self.encoder_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm")
+ self.fc1 = keras.layers.Dense(config.decoder_ffn_dim, name="fc1")
+ self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2")
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
+ self.config = config
+
+ def call(
+ self,
+ hidden_states,
+ attention_mask: tf.Tensor | None = None,
+ encoder_hidden_states: tf.Tensor | None = None,
+ encoder_attention_mask: tf.Tensor | None = None,
+ layer_head_mask: tf.Tensor | None = None,
+ cross_attn_layer_head_mask: tf.Tensor | None = None,
+ past_key_value: Tuple[tf.Tensor] | None = None,
+ training=False,
+ ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:
+ """
+ Args:
+ hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`tf.Tensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ encoder_hidden_states (`tf.Tensor`):
+ cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
+ encoder_attention_mask (`tf.Tensor`): encoder attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
+ `(decoder_attention_heads,)`
+ cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module.
+ `(decoder_attention_heads,)`
+ past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states
+ """
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Self Attention
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ past_key_value=self_attn_past_key_value,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ training=training,
+ )
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = residual + hidden_states
+
+ # Cross-Attention Block
+ cross_attn_present_key_value = None
+ cross_attn_weights = None
+ if encoder_hidden_states is not None:
+ residual = hidden_states
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
+
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
+ hidden_states=hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attention_mask,
+ layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=cross_attn_past_key_value,
+ training=training,
+ )
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = residual + hidden_states
+
+ # add cross-attn to positions 3,4 of present_key_value tuple
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = self.activation_dropout(hidden_states, training=training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = residual + hidden_states
+
+ return (
+ hidden_states,
+ self_attn_weights,
+ cross_attn_weights,
+ present_key_value,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "self_attn", None) is not None:
+ with tf.name_scope(self.self_attn.name):
+ self.self_attn.build(None)
+ if getattr(self, "self_attn_layer_norm", None) is not None:
+ with tf.name_scope(self.self_attn_layer_norm.name):
+ self.self_attn_layer_norm.build([None, None, self.embed_dim])
+ if getattr(self, "encoder_attn", None) is not None:
+ with tf.name_scope(self.encoder_attn.name):
+ self.encoder_attn.build(None)
+ if getattr(self, "encoder_attn_layer_norm", None) is not None:
+ with tf.name_scope(self.encoder_attn_layer_norm.name):
+ self.encoder_attn_layer_norm.build([None, None, self.embed_dim])
+ if getattr(self, "fc1", None) is not None:
+ with tf.name_scope(self.fc1.name):
+ self.fc1.build([None, None, self.embed_dim])
+ if getattr(self, "fc2", None) is not None:
+ with tf.name_scope(self.fc2.name):
+ self.fc2.build([None, None, self.config.decoder_ffn_dim])
+ if getattr(self, "final_layer_norm", None) is not None:
+ with tf.name_scope(self.final_layer_norm.name):
+ self.final_layer_norm.build([None, None, self.embed_dim])
+
+
+class TFSpeech2TextPreTrainedModel(TFPreTrainedModel):
+ config_class = Speech2TextConfig
+ base_model_prefix = "model"
+ main_input_name = "input_features"
+ _keys_to_ignore_on_load_unexpected = [r"encoder.embed_positions.weights"]
+
+ def _get_feat_extract_output_lengths(self, input_lengths: tf.Tensor):
+ """
+ Computes the output length of the convolutional layers
+ """
+ for _ in range(self.config.num_conv_layers):
+ input_lengths = (input_lengths - 1) // 2 + 1
+
+ return input_lengths
+
+ @property
+ def input_signature(self):
+ return {
+ "input_features": tf.TensorSpec(
+ (None, None, self.config.input_feat_per_channel * self.config.input_channels),
+ tf.float32,
+ name="input_features",
+ ),
+ "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
+ "decoder_input_ids": tf.TensorSpec((None, None), tf.int32, name="decoder_input_ids"),
+ "decoder_attention_mask": tf.TensorSpec((None, None), tf.int32, name="decoder_attention_mask"),
+ }
+
+
+SPEECH_TO_TEXT_START_DOCSTRING = r"""
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TensorFlow models and layers in `transformers` accept two formats as input:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional argument.
+
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
+ positional argument:
+
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
+
+ Note that when creating models and layers with
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
+ about any of this, as you can just pass inputs like you would to any other Python function!
+
+
+
+ Parameters:
+ config ([`Speech2TextConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+SPEECH_TO_TEXT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_features (`tf.Tensor` of shape `(batch_size, sequence_length, feature_size)`):
+ Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be obtained
+ by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.*
+ via the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the
+ [`AutoFeatureExtractor`] should be used for extracting the fbank features, padding and conversion into a
+ tensor of floats. See [`~Speech2TextFeatureExtractor.__call__`]
+ attention_mask (`tf.Tensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`Speech2TextTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ SpeechToText uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ For translation and summarization training, `decoder_input_ids` should be provided. If no
+ `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
+ for denoising pre-training following the paper.
+ decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.
+ head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ encoder_outputs (`tf.FloatTensor`, *optional*):
+ hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ of shape `(batch_size, sequence_length, hidden_size)` is a sequence of
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ decoder_inputs_embeds (`tf.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
+ input (see `past_key_values`). This is useful if you want more control over how to convert
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
+ config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+"""
+
+
+@keras_serializable
+class TFSpeech2TextEncoder(keras.layers.Layer):
+ config_class = Speech2TextConfig
+ """
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
+ [`TFSpeech2TextEncoderLayer`].
+
+ Args:
+ config: Speech2TextConfig
+ """
+
+ def __init__(self, config: Speech2TextConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+
+ self.dropout = keras.layers.Dropout(config.dropout)
+ self.layerdrop = config.encoder_layerdrop
+
+ embed_dim = config.d_model
+ self.padding_idx = config.pad_token_id
+ self.max_source_positions = config.max_source_positions
+ self.embed_scale = tf.math.sqrt(float(embed_dim)) if config.scale_embedding else 1.0
+
+ self.conv = TFConv1dSubsampler(config, name="conv")
+
+ self.embed_positions = TFSpeech2TextSinusoidalPositionalEmbedding(
+ num_positions=config.max_source_positions,
+ embedding_dim=embed_dim,
+ padding_idx=self.padding_idx,
+ name="embed_positions",
+ )
+ self.layers = [TFSpeech2TextEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)]
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm")
+
+ def _get_feat_extract_output_lengths(self, input_lengths: tf.Tensor):
+ """
+ Computes the output length of the convolutional layers
+ """
+ for _ in range(self.config.num_conv_layers):
+ input_lengths = (input_lengths - 1) // 2 + 1
+
+ return input_lengths
+
+ def _get_feature_vector_attention_mask(self, feature_vector_length, attention_mask):
+ # generate creates 3D attention mask, because of the shape of input_features
+ # convert it to 2D if thats the case
+ if len(attention_mask.shape) > 2:
+ attention_mask = attention_mask[:, :, -1]
+
+ subsampled_lengths = self._get_feat_extract_output_lengths(tf.math.reduce_sum(attention_mask, -1))
+ bsz = shape_list(attention_mask)[0]
+ indices = tf.concat(
+ (
+ tf.expand_dims(tf.range(bsz, dtype=attention_mask.dtype), -1),
+ tf.expand_dims(subsampled_lengths - 1, -1),
+ ),
+ axis=-1,
+ )
+ attention_mask = tf.scatter_nd(indices=indices, updates=tf.ones(bsz), shape=[bsz, feature_vector_length])
+ attention_mask = tf.cast(tf.reverse(tf.math.cumsum(tf.reverse(attention_mask, [-1]), -1), [-1]), tf.int64)
+ return attention_mask
+
+ @unpack_inputs
+ def call(
+ self,
+ input_features=None,
+ attention_mask=None,
+ head_mask=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ training=False,
+ ):
+ """
+ Args:
+ input_features (`tf.Tensor` of shape `(batch_size, sequence_length, feature_size)`):
+ Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be
+ obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a
+ `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into
+ `input_features`, the [`AutoFeatureExtractor`] should be used for extracting the fbank features,
+ padding and conversion into a tensor of floats. See [`~Speech2TextFeatureExtractor.__call__`]
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, `optional):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ if input_features is None:
+ raise ValueError("You have to specify input_features")
+
+ inputs_embeds = self.conv(input_features)
+ inputs_embeds = self.embed_scale * inputs_embeds
+
+ # subsample attention mask if necessary
+ if attention_mask is not None:
+ attention_mask = self._get_feature_vector_attention_mask(tf.shape(inputs_embeds)[1], attention_mask)
+ padding_mask = tf.cast(tf.math.not_equal(attention_mask, 1), tf.int64)
+ else:
+ padding_mask = tf.zeros(tf.shape(inputs_embeds)[:-1], dtype=tf.int64)
+
+ embed_pos = self.embed_positions(padding_mask)
+
+ hidden_states = inputs_embeds + embed_pos
+ hidden_states = self.dropout(hidden_states, training=training)
+
+ # check attention mask and invert
+ if attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ attention_mask = _expand_mask(attention_mask)
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ # check if head_mask has a correct number of layers specified if desired
+ if head_mask is not None:
+ tf.debugging.assert_equal(
+ shape_list(head_mask)[0],
+ len(self.layers),
+ message=(
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
+ f" {shape_list(head_mask)[0]}."
+ ),
+ )
+
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ dropout_probability = random.uniform(0, 1)
+ if training and (dropout_probability < self.layerdrop): # skip the layer
+ continue
+
+ hidden_states, attn = encoder_layer(
+ hidden_states,
+ attention_mask,
+ head_mask[idx] if head_mask is not None else None,
+ training=training,
+ )
+
+ if output_attentions:
+ all_attentions += (attn,)
+
+ hidden_states = self.layer_norm(hidden_states)
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return TFBaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "conv", None) is not None:
+ with tf.name_scope(self.conv.name):
+ self.conv.build(None)
+ if getattr(self, "embed_positions", None) is not None:
+ with tf.name_scope(self.embed_positions.name):
+ self.embed_positions.build(None)
+ if getattr(self, "layer_norm", None) is not None:
+ with tf.name_scope(self.layer_norm.name):
+ self.layer_norm.build([None, None, self.config.d_model])
+ if getattr(self, "layers", None) is not None:
+ for layer in self.layers:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+@keras_serializable
+class TFSpeech2TextDecoder(keras.layers.Layer):
+ config_class = Speech2TextConfig
+ """
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TFSpeech2TextDecoderLayer`]
+
+ Args:
+ config: Speech2TextConfig
+ """
+
+ def __init__(self, config: Speech2TextConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+ self.layerdrop = config.decoder_layerdrop
+ self.padding_idx = config.pad_token_id
+ self.max_target_positions = config.max_target_positions
+ self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
+
+ self.embed_tokens = TFSharedEmbeddings(config.vocab_size, config.d_model, name="embed_tokens")
+
+ self.embed_positions = TFSpeech2TextSinusoidalPositionalEmbedding(
+ num_positions=config.max_target_positions,
+ embedding_dim=config.d_model,
+ padding_idx=self.padding_idx,
+ name="embed_positions",
+ )
+
+ self.layers = [TFSpeech2TextDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)]
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm")
+
+ self.dropout = keras.layers.Dropout(config.dropout)
+
+ def get_embed_tokens(self):
+ return self.embed_tokens
+
+ def set_embed_tokens(self, embed_tokens):
+ self.embed_tokens = embed_tokens
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids=None,
+ inputs_embeds=None,
+ attention_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ head_mask=None,
+ cross_attn_head_mask=None,
+ past_key_values=None,
+ use_cache=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ training=False,
+ ):
+ r"""
+ Args:
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`Speech2TextTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
+ of the decoder.
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
+ selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
+ decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = shape_list(input_ids)
+ elif inputs_embeds is not None:
+ input_shape = shape_list(inputs_embeds)[:-1]
+ else:
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
+
+ # past_key_values_length
+ past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0
+
+ if inputs_embeds is None:
+ check_embeddings_within_bounds(input_ids, self.embed_tokens.vocab_size)
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+ else:
+ inputs_embeds = inputs_embeds
+
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ if input_shape[-1] > 1:
+ combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length)
+ else:
+ combined_attention_mask = _expand_mask(
+ tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1]
+ )
+
+ if attention_mask is not None:
+ combined_attention_mask = combined_attention_mask + _expand_mask(attention_mask, tgt_len=input_shape[-1])
+
+ # expand encoder attention mask
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1])
+
+ # embed positions
+ positions = self.embed_positions(input_ids, past_key_values_length=past_key_values_length)
+
+ hidden_states = inputs_embeds + positions
+ hidden_states = self.dropout(hidden_states, training=training)
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_cross_attns = () if (output_attentions and encoder_hidden_states is not None) else None
+ next_decoder_cache = () if use_cache else None
+
+ # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired
+ for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]:
+ if attn_mask is not None:
+ tf.debugging.assert_equal(
+ shape_list(attn_mask)[0],
+ len(self.layers),
+ message=(
+ f"The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for"
+ f" {shape_list(attn_mask)[0]}."
+ ),
+ )
+
+ for idx, decoder_layer in enumerate(self.layers):
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+ dropout_probability = random.uniform(0, 1)
+ if training and (dropout_probability < self.layerdrop):
+ continue
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+ cross_attn_layer_head_mask = cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
+
+ hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer(
+ hidden_states,
+ attention_mask=combined_attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ layer_head_mask=head_mask[idx] if head_mask is not None else None,
+ cross_attn_layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=past_key_value,
+ )
+
+ if use_cache:
+ next_decoder_cache += (present_key_value,)
+
+ if output_attentions:
+ all_self_attns += (layer_self_attn,)
+
+ if encoder_hidden_states is not None:
+ all_cross_attns += (layer_cross_attn,)
+
+ hidden_states = self.layer_norm(hidden_states)
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ next_cache = next_decoder_cache if use_cache else None
+
+ if not return_dict:
+ return hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attns
+ else:
+ return TFBaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ cross_attentions=all_cross_attns,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embed_tokens", None) is not None:
+ with tf.name_scope(self.embed_tokens.name):
+ self.embed_tokens.build(None)
+ if getattr(self, "embed_positions", None) is not None:
+ with tf.name_scope(self.embed_positions.name):
+ self.embed_positions.build(None)
+ if getattr(self, "layer_norm", None) is not None:
+ with tf.name_scope(self.layer_norm.name):
+ self.layer_norm.build([None, None, self.config.d_model])
+ if getattr(self, "layers", None) is not None:
+ for layer in self.layers:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+@keras_serializable
+class TFSpeech2TextMainLayer(keras.layers.Layer):
+ config_class = Speech2TextConfig
+
+ def __init__(self, config: Speech2TextConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+
+ self.encoder = TFSpeech2TextEncoder(config, name="encoder")
+ self.decoder = TFSpeech2TextDecoder(config, name="decoder")
+
+ def get_input_embeddings(self):
+ return self.decoder.embed_tokens
+
+ def set_input_embeddings(self, new_embeddings):
+ self.decoder.embed_tokens = new_embeddings
+
+ @unpack_inputs
+ def call(
+ self,
+ input_features=None,
+ attention_mask=None,
+ decoder_input_ids=None,
+ decoder_attention_mask=None,
+ head_mask=None,
+ decoder_head_mask=None,
+ cross_attn_head_mask=None,
+ encoder_outputs=None,
+ past_key_values=None,
+ decoder_inputs_embeds=None,
+ use_cache=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ training=False,
+ **kwargs,
+ ):
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if encoder_outputs is None:
+ encoder_outputs = self.encoder(
+ input_features=input_features,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True
+ elif return_dict and not isinstance(encoder_outputs, TFBaseModelOutput):
+ encoder_outputs = TFBaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+ # If the user passed a TFBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False
+ elif not return_dict and not isinstance(encoder_outputs, tuple):
+ encoder_outputs = encoder_outputs.to_tuple()
+
+ # downsample encoder attention mask
+ if attention_mask is not None:
+ encoder_attention_mask = self.encoder._get_feature_vector_attention_mask(
+ tf.shape(encoder_outputs[0])[1], attention_mask
+ )
+ else:
+ encoder_attention_mask = None
+
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=encoder_attention_mask,
+ head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+
+ return TFSeq2SeqModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "encoder", None) is not None:
+ with tf.name_scope(self.encoder.name):
+ self.encoder.build(None)
+ if getattr(self, "decoder", None) is not None:
+ with tf.name_scope(self.decoder.name):
+ self.decoder.build(None)
+
+
+@add_start_docstrings(
+ "The bare Speech2Text Model outputting raw hidden-states without any specific head on top.",
+ SPEECH_TO_TEXT_START_DOCSTRING,
+)
+class TFSpeech2TextModel(TFSpeech2TextPreTrainedModel):
+ def __init__(self, config: Speech2TextConfig, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.model = TFSpeech2TextMainLayer(config, name="model")
+
+ def get_encoder(self):
+ return self.model.encoder
+
+ def get_decoder(self):
+ return self.model.decoder
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(SPEECH_TO_TEXT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFSeq2SeqModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_features: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ decoder_input_ids: np.ndarray | tf.Tensor | None = None,
+ decoder_attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ decoder_head_mask: np.ndarray | tf.Tensor | None = None,
+ cross_attn_head_mask: np.ndarray | tf.Tensor | None = None,
+ encoder_outputs: np.ndarray | tf.Tensor | None = None,
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
+ decoder_inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ **kwargs,
+ ) -> Union[Tuple, TFSeq2SeqModelOutput]:
+ outputs = self.model(
+ input_features=input_features,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ encoder_outputs=encoder_outputs,
+ past_key_values=past_key_values,
+ decoder_inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return outputs
+
+ def serving_output(self, output):
+ pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
+ dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
+ dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
+ cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
+ enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
+ enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
+
+ return TFSeq2SeqModelOutput(
+ last_hidden_state=output.last_hidden_state,
+ past_key_values=pkv,
+ decoder_hidden_states=dec_hs,
+ decoder_attentions=dec_attns,
+ cross_attentions=cross_attns,
+ encoder_last_hidden_state=output.encoder_last_hidden_state,
+ encoder_hidden_states=enc_hs,
+ encoder_attentions=enc_attns,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "model", None) is not None:
+ with tf.name_scope(self.model.name):
+ self.model.build(None)
+
+
+@add_start_docstrings(
+ "The Speech2Text Model with a language modeling head. Can be used for summarization.",
+ SPEECH_TO_TEXT_START_DOCSTRING,
+)
+class TFSpeech2TextForConditionalGeneration(TFSpeech2TextPreTrainedModel, TFCausalLanguageModelingLoss):
+ def __init__(self, config: Speech2TextConfig):
+ super().__init__(config)
+ self.model = TFSpeech2TextMainLayer(config, name="model")
+ self.lm_head = keras.layers.Dense(self.config.vocab_size, use_bias=False, name="lm_head")
+ # TODO (Joao): investigate why Speech2Text has numerical issues in XLA generate
+ self.supports_xla_generation = False
+ self.config = config
+
+ def get_encoder(self):
+ return self.model.encoder
+
+ def get_decoder(self):
+ return self.model.decoder
+
+ def resize_token_embeddings(self, new_num_tokens: int) -> tf.Variable:
+ new_embeddings = super().resize_token_embeddings(new_num_tokens)
+ return new_embeddings
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(SPEECH_TO_TEXT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+ def call(
+ self,
+ input_features: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ decoder_input_ids: np.ndarray | tf.Tensor | None = None,
+ decoder_attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ decoder_head_mask: np.ndarray | tf.Tensor | None = None,
+ cross_attn_head_mask: np.ndarray | tf.Tensor | None = None,
+ encoder_outputs: np.ndarray | tf.Tensor | None = None,
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
+ decoder_inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ **kwargs,
+ ) -> Union[Tuple, TFSeq2SeqLMOutput]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> import tensorflow as tf
+ >>> from transformers import Speech2TextProcessor, TFSpeech2TextForConditionalGeneration
+ >>> from datasets import load_dataset
+ >>> import soundfile as sf
+
+ >>> model = TFSpeech2TextForConditionalGeneration.from_pretrained(
+ ... "facebook/s2t-small-librispeech-asr", from_pt=True
+ ... )
+ >>> processor = Speech2TextProcessor.from_pretrained("facebook/s2t-small-librispeech-asr")
+
+
+ >>> def map_to_array(batch):
+ ... speech, _ = sf.read(batch["file"])
+ ... batch["speech"] = speech
+ ... return batch
+
+
+ >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
+ >>> ds = ds.map(map_to_array)
+ >>> ds.set_format(type="tf")
+
+ >>> input_features = processor(
+ ... ds["speech"][0], sampling_rate=16000, return_tensors="tf"
+ ... ).input_features # Batch size 1
+ >>> generated_ids = model.generate(input_features)
+
+ >>> transcription = processor.batch_decode(generated_ids)
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if labels is not None:
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
+ decoder_input_ids = shift_tokens_right(
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
+ )
+
+ outputs = self.model(
+ input_features=input_features,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ encoder_outputs=encoder_outputs,
+ decoder_attention_mask=decoder_attention_mask,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ decoder_inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ lm_logits = self.lm_head(outputs[0])
+ masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits)
+
+ if not return_dict:
+ output = (lm_logits,) + outputs[1:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return TFSeq2SeqLMOutput(
+ loss=masked_lm_loss,
+ logits=lm_logits,
+ past_key_values=outputs.past_key_values,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ )
+
+ def serving_output(self, output):
+ pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
+ dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
+ dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
+ cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
+ enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
+ enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
+
+ return TFSeq2SeqLMOutput(
+ logits=output.logits,
+ past_key_values=pkv,
+ decoder_hidden_states=dec_hs,
+ decoder_attentions=dec_attns,
+ cross_attentions=cross_attns,
+ encoder_last_hidden_state=output.encoder_last_hidden_state,
+ encoder_hidden_states=enc_hs,
+ encoder_attentions=enc_attns,
+ )
+
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ head_mask=None,
+ decoder_head_mask=None,
+ cross_attn_head_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ # cut decoder_input_ids if past is used
+ if past_key_values is not None:
+ decoder_input_ids = decoder_input_ids[:, -1:]
+
+ return {
+ "input_features": None, # needs to be passed to make Keras.layer.__call__ happy
+ "encoder_outputs": encoder_outputs,
+ "past_key_values": past_key_values,
+ "decoder_input_ids": decoder_input_ids,
+ "attention_mask": attention_mask,
+ "head_mask": head_mask,
+ "decoder_head_mask": decoder_head_mask,
+ "cross_attn_head_mask": cross_attn_head_mask,
+ "use_cache": use_cache, # change this to avoid caching (presumably for debugging)
+ }
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "model", None) is not None:
+ with tf.name_scope(self.model.name):
+ self.model.build(None)
+ if getattr(self, "lm_head", None) is not None:
+ with tf.name_scope(self.lm_head.name):
+ self.lm_head.build([None, None, self.config.d_model])
+
+ def tf_to_pt_weight_rename(self, tf_weight):
+ if tf_weight == "lm_head.weight":
+ return tf_weight, "model.decoder.embed_tokens.weight"
+ else:
+ return (tf_weight,)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/processing_speech_to_text.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/processing_speech_to_text.py
new file mode 100644
index 0000000000000000000000000000000000000000..42e900633867b3d83be4238c548932ae582aa623
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/processing_speech_to_text.py
@@ -0,0 +1,116 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Speech processor class for Speech2Text
+"""
+import warnings
+from contextlib import contextmanager
+
+from ...processing_utils import ProcessorMixin
+
+
+class Speech2TextProcessor(ProcessorMixin):
+ r"""
+ Constructs a Speech2Text processor which wraps a Speech2Text feature extractor and a Speech2Text tokenizer into a
+ single processor.
+
+ [`Speech2TextProcessor`] offers all the functionalities of [`Speech2TextFeatureExtractor`] and
+ [`Speech2TextTokenizer`]. See the [`~Speech2TextProcessor.__call__`] and [`~Speech2TextProcessor.decode`] for more
+ information.
+
+ Args:
+ feature_extractor (`Speech2TextFeatureExtractor`):
+ An instance of [`Speech2TextFeatureExtractor`]. The feature extractor is a required input.
+ tokenizer (`Speech2TextTokenizer`):
+ An instance of [`Speech2TextTokenizer`]. The tokenizer is a required input.
+ """
+
+ feature_extractor_class = "Speech2TextFeatureExtractor"
+ tokenizer_class = "Speech2TextTokenizer"
+
+ def __init__(self, feature_extractor, tokenizer):
+ super().__init__(feature_extractor, tokenizer)
+ self.current_processor = self.feature_extractor
+ self._in_target_context_manager = False
+
+ def __call__(self, *args, **kwargs):
+ """
+ When used in normal mode, this method forwards all its arguments to Speech2TextFeatureExtractor's
+ [`~Speech2TextFeatureExtractor.__call__`] and returns its output. If used in the context
+ [`~Speech2TextProcessor.as_target_processor`] this method forwards all its arguments to Speech2TextTokenizer's
+ [`~Speech2TextTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more
+ information.
+ """
+ # For backward compatibility
+ if self._in_target_context_manager:
+ return self.current_processor(*args, **kwargs)
+
+ if "raw_speech" in kwargs:
+ warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.")
+ audio = kwargs.pop("raw_speech")
+ else:
+ audio = kwargs.pop("audio", None)
+ sampling_rate = kwargs.pop("sampling_rate", None)
+ text = kwargs.pop("text", None)
+ if len(args) > 0:
+ audio = args[0]
+ args = args[1:]
+
+ if audio is None and text is None:
+ raise ValueError("You need to specify either an `audio` or `text` input to process.")
+
+ if audio is not None:
+ inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs)
+ if text is not None:
+ encodings = self.tokenizer(text, **kwargs)
+
+ if text is None:
+ return inputs
+ elif audio is None:
+ return encodings
+ else:
+ inputs["labels"] = encodings["input_ids"]
+ return inputs
+
+ def batch_decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to Speech2TextTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
+ refer to the docstring of this method for more information.
+ """
+ return self.tokenizer.batch_decode(*args, **kwargs)
+
+ def decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to Speech2TextTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer
+ to the docstring of this method for more information.
+ """
+ return self.tokenizer.decode(*args, **kwargs)
+
+ @contextmanager
+ def as_target_processor(self):
+ """
+ Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning
+ Speech2Text.
+ """
+ warnings.warn(
+ "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
+ "labels by using the argument `text` of the regular `__call__` method (either in the same call as "
+ "your audio inputs, or in a separate call."
+ )
+ self._in_target_context_manager = True
+ self.current_processor = self.tokenizer
+ yield
+ self.current_processor = self.feature_extractor
+ self._in_target_context_manager = False
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/tokenization_speech_to_text.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/tokenization_speech_to_text.py
new file mode 100644
index 0000000000000000000000000000000000000000..27db0a671ebc7d251f77a11ff88969921d1ccc7c
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/speech_to_text/tokenization_speech_to_text.py
@@ -0,0 +1,289 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for Speech2Text."""
+import json
+import os
+from pathlib import Path
+from shutil import copyfile
+from typing import Any, Dict, List, Optional, Tuple, Union
+
+import sentencepiece
+
+from ...tokenization_utils import PreTrainedTokenizer
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+SPIECE_UNDERLINE = "▁"
+
+VOCAB_FILES_NAMES = {
+ "vocab_file": "vocab.json",
+ "spm_file": "sentencepiece.bpe.model",
+}
+
+
+MAX_MODEL_INPUT_SIZES = {
+ "facebook/s2t-small-librispeech-asr": 1024,
+}
+
+MUSTC_LANGS = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"]
+
+LANGUAGES = {"mustc": MUSTC_LANGS}
+
+
+class Speech2TextTokenizer(PreTrainedTokenizer):
+ """
+ Construct an Speech2Text tokenizer.
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains some of the main methods. Users should refer to
+ the superclass for more information regarding such methods.
+
+ Args:
+ vocab_file (`str`):
+ File containing the vocabulary.
+ spm_file (`str`):
+ Path to the [SentencePiece](https://github.com/google/sentencepiece) model file
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sentence token.
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sentence token.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ do_upper_case (`bool`, *optional*, defaults to `False`):
+ Whether or not to uppercase the output when decoding.
+ do_lower_case (`bool`, *optional*, defaults to `False`):
+ Whether or not to lowercase the input when tokenizing.
+ tgt_lang (`str`, *optional*):
+ A string representing the target language.
+ sp_model_kwargs (`dict`, *optional*):
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
+ to set:
+
+ - `enable_sampling`: Enable subword regularization.
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
+
+ - `nbest_size = {0,1}`: No sampling is performed.
+ - `nbest_size > 1`: samples from the nbest_size results.
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
+ using forward-filtering-and-backward-sampling algorithm.
+
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
+ BPE-dropout.
+
+ **kwargs
+ Additional keyword arguments passed along to [`PreTrainedTokenizer`]
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ prefix_tokens: List[int] = []
+
+ def __init__(
+ self,
+ vocab_file,
+ spm_file,
+ bos_token="",
+ eos_token="",
+ pad_token="",
+ unk_token="",
+ do_upper_case=False,
+ do_lower_case=False,
+ tgt_lang=None,
+ lang_codes=None,
+ additional_special_tokens=None,
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
+ **kwargs,
+ ) -> None:
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+
+ self.do_upper_case = do_upper_case
+ self.do_lower_case = do_lower_case
+
+ self.encoder = load_json(vocab_file)
+ self.decoder = {v: k for k, v in self.encoder.items()}
+ self.spm_file = spm_file
+ self.sp_model = load_spm(spm_file, self.sp_model_kwargs)
+
+ if lang_codes is not None:
+ self.lang_codes = lang_codes
+ self.langs = LANGUAGES[lang_codes]
+ self.lang_tokens = [f"" for lang in self.langs]
+ self.lang_code_to_id = {lang: self.sp_model.PieceToId(f"") for lang in self.langs}
+ if additional_special_tokens is not None:
+ additional_special_tokens = self.lang_tokens + additional_special_tokens
+ else:
+ additional_special_tokens = self.lang_tokens
+ self._tgt_lang = tgt_lang if tgt_lang is not None else self.langs[0]
+
+ self.set_tgt_lang_special_tokens(self._tgt_lang)
+ else:
+ self.lang_code_to_id = {}
+
+ super().__init__(
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ do_upper_case=do_upper_case,
+ do_lower_case=do_lower_case,
+ tgt_lang=tgt_lang,
+ lang_codes=lang_codes,
+ sp_model_kwargs=self.sp_model_kwargs,
+ additional_special_tokens=additional_special_tokens,
+ **kwargs,
+ )
+
+ @property
+ def vocab_size(self) -> int:
+ return len(self.encoder)
+
+ def get_vocab(self) -> Dict:
+ vocab = self.encoder.copy()
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ @property
+ def tgt_lang(self) -> str:
+ return self._tgt_lang
+
+ @tgt_lang.setter
+ def tgt_lang(self, new_tgt_lang) -> None:
+ self._tgt_lang = new_tgt_lang
+ self.set_tgt_lang_special_tokens(new_tgt_lang)
+
+ def set_tgt_lang_special_tokens(self, tgt_lang: str) -> None:
+ """Reset the special tokens to the target language setting. prefix=[eos, tgt_lang_code] and suffix=[eos]."""
+ lang_code_id = self.lang_code_to_id[tgt_lang]
+ self.prefix_tokens = [lang_code_id]
+
+ def _tokenize(self, text: str) -> List[str]:
+ return self.sp_model.encode(text, out_type=str)
+
+ def _convert_token_to_id(self, token):
+ return self.encoder.get(token, self.encoder[self.unk_token])
+
+ def _convert_id_to_token(self, index: int) -> str:
+ """Converts an index (integer) in a token (str) using the decoder."""
+ return self.decoder.get(index, self.unk_token)
+
+ def convert_tokens_to_string(self, tokens: List[str]) -> str:
+ """Converts a sequence of tokens (strings for sub-words) in a single string."""
+ current_sub_tokens = []
+ out_string = ""
+ for token in tokens:
+ # make sure that special tokens are not decoded using sentencepiece model
+ if token in self.all_special_tokens:
+ decoded = self.sp_model.decode(current_sub_tokens)
+ out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
+ current_sub_tokens = []
+ else:
+ current_sub_tokens.append(token)
+ decoded = self.sp_model.decode(current_sub_tokens)
+ out_string += decoded.upper() if self.do_upper_case else decoded
+ return out_string.strip()
+
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> List[int]:
+ """Build model inputs from a sequence by appending eos_token_id."""
+ if token_ids_1 is None:
+ return self.prefix_tokens + token_ids_0 + [self.eos_token_id]
+ # We don't expect to process pairs, but leave the pair logic for API consistency
+ return self.prefix_tokens + token_ids_0 + token_ids_1 + [self.eos_token_id]
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ prefix_ones = [1] * len(self.prefix_tokens)
+ suffix_ones = [1]
+ if token_ids_1 is None:
+ return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones
+ return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones
+
+ def __getstate__(self) -> Dict:
+ state = self.__dict__.copy()
+ state["sp_model"] = None
+ return state
+
+ def __setstate__(self, d: Dict) -> None:
+ self.__dict__ = d
+
+ # for backward compatibility
+ if not hasattr(self, "sp_model_kwargs"):
+ self.sp_model_kwargs = {}
+
+ self.sp_model = load_spm(self.spm_file, self.sp_model_kwargs)
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ save_dir = Path(save_directory)
+ assert save_dir.is_dir(), f"{save_directory} should be a directory"
+ vocab_save_path = save_dir / (
+ (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
+ )
+ spm_save_path = save_dir / (
+ (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
+ )
+
+ save_json(self.encoder, vocab_save_path)
+
+ if os.path.abspath(self.spm_file) != os.path.abspath(spm_save_path) and os.path.isfile(self.spm_file):
+ copyfile(self.spm_file, spm_save_path)
+ elif not os.path.isfile(self.spm_file):
+ with open(spm_save_path, "wb") as fi:
+ content_spiece_model = self.sp_model.serialized_model_proto()
+ fi.write(content_spiece_model)
+
+ return (str(vocab_save_path), str(spm_save_path))
+
+
+def load_spm(path: str, sp_model_kwargs: Dict[str, Any]) -> sentencepiece.SentencePieceProcessor:
+ spm = sentencepiece.SentencePieceProcessor(**sp_model_kwargs)
+ spm.Load(str(path))
+ return spm
+
+
+def load_json(path: str) -> Union[Dict, List]:
+ with open(path, "r") as f:
+ return json.load(f)
+
+
+def save_json(data, path: str) -> None:
+ with open(path, "w") as f:
+ json.dump(data, f, indent=2)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/table_transformer/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/table_transformer/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..346bc9ef9caaa6412a5402016b9ed9bfec48c04b
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/table_transformer/__init__.py
@@ -0,0 +1,65 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
+
+
+_import_structure = {
+ "configuration_table_transformer": [
+ "TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
+ "TableTransformerConfig",
+ "TableTransformerOnnxConfig",
+ ]
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_table_transformer"] = [
+ "TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TableTransformerForObjectDetection",
+ "TableTransformerModel",
+ "TableTransformerPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_table_transformer import (
+ TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
+ TableTransformerConfig,
+ TableTransformerOnnxConfig,
+ )
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_table_transformer import (
+ TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TableTransformerForObjectDetection,
+ TableTransformerModel,
+ TableTransformerPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/table_transformer/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/table_transformer/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4c7eb4713705c69d56dfd39b8b37bd02738c4afb
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/table_transformer/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/table_transformer/__pycache__/configuration_table_transformer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/table_transformer/__pycache__/configuration_table_transformer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bf07100960b285025863082ad693c61603c989e7
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/table_transformer/__pycache__/configuration_table_transformer.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/table_transformer/__pycache__/convert_table_transformer_to_hf.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/table_transformer/__pycache__/convert_table_transformer_to_hf.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..92514cbb60dc241002e4d74a763db0e00fd56226
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/table_transformer/__pycache__/convert_table_transformer_to_hf.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/table_transformer/__pycache__/convert_table_transformer_to_hf_no_timm.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/table_transformer/__pycache__/convert_table_transformer_to_hf_no_timm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e44d9f9de6116542fda85ddb658739cf4cc8796e
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/table_transformer/__pycache__/convert_table_transformer_to_hf_no_timm.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/table_transformer/__pycache__/modeling_table_transformer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/table_transformer/__pycache__/modeling_table_transformer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9732d06bc54a8c4deac1d3f6982c11971e59489d
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/table_transformer/__pycache__/modeling_table_transformer.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/table_transformer/configuration_table_transformer.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/table_transformer/configuration_table_transformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a2ff6bbab3b24c7b45f0b2ca8c58af70e560ba3
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/table_transformer/configuration_table_transformer.py
@@ -0,0 +1,273 @@
+# coding=utf-8
+# Copyright The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Table Transformer model configuration"""
+from collections import OrderedDict
+from typing import Mapping
+
+from packaging import version
+
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxConfig
+from ...utils import logging
+from ..auto import CONFIG_MAPPING
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class TableTransformerConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`TableTransformerModel`]. It is used to
+ instantiate a Table Transformer model according to the specified arguments, defining the model architecture.
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the Table Transformer
+ [microsoft/table-transformer-detection](https://huggingface.co/microsoft/table-transformer-detection) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ use_timm_backbone (`bool`, *optional*, defaults to `True`):
+ Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`]
+ API.
+ backbone_config (`PretrainedConfig` or `dict`, *optional*):
+ The configuration of the backbone model. Only used in case `use_timm_backbone` is set to `False` in which
+ case it will default to `ResNetConfig()`.
+ num_channels (`int`, *optional*, defaults to 3):
+ The number of input channels.
+ num_queries (`int`, *optional*, defaults to 100):
+ Number of object queries, i.e. detection slots. This is the maximal number of objects
+ [`TableTransformerModel`] can detect in a single image. For COCO, we recommend 100 queries.
+ d_model (`int`, *optional*, defaults to 256):
+ Dimension of the layers.
+ encoder_layers (`int`, *optional*, defaults to 6):
+ Number of encoder layers.
+ decoder_layers (`int`, *optional*, defaults to 6):
+ Number of decoder layers.
+ encoder_attention_heads (`int`, *optional*, defaults to 8):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ decoder_attention_heads (`int`, *optional*, defaults to 8):
+ Number of attention heads for each attention layer in the Transformer decoder.
+ decoder_ffn_dim (`int`, *optional*, defaults to 2048):
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
+ encoder_ffn_dim (`int`, *optional*, defaults to 2048):
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
+ activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ activation_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for activations inside the fully connected layer.
+ init_std (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ init_xavier_std (`float`, *optional*, defaults to 1):
+ The scaling factor used for the Xavier initialization gain in the HM Attention map module.
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
+ for more details.
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
+ for more details.
+ auxiliary_loss (`bool`, *optional*, defaults to `False`):
+ Whether auxiliary decoding losses (loss at each decoder layer) are to be used.
+ position_embedding_type (`str`, *optional*, defaults to `"sine"`):
+ Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`.
+ backbone (`str`, *optional*):
+ Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
+ will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
+ is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
+ use_pretrained_backbone (`bool`, *optional*, `True`):
+ Whether to use pretrained weights for the backbone.
+ backbone_kwargs (`dict`, *optional*):
+ Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
+ e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
+ dilation (`bool`, *optional*, defaults to `False`):
+ Whether to replace stride with dilation in the last convolutional block (DC5). Only supported when
+ `use_timm_backbone` = `True`.
+ class_cost (`float`, *optional*, defaults to 1):
+ Relative weight of the classification error in the Hungarian matching cost.
+ bbox_cost (`float`, *optional*, defaults to 5):
+ Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost.
+ giou_cost (`float`, *optional*, defaults to 2):
+ Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost.
+ mask_loss_coefficient (`float`, *optional*, defaults to 1):
+ Relative weight of the Focal loss in the panoptic segmentation loss.
+ dice_loss_coefficient (`float`, *optional*, defaults to 1):
+ Relative weight of the DICE/F-1 loss in the panoptic segmentation loss.
+ bbox_loss_coefficient (`float`, *optional*, defaults to 5):
+ Relative weight of the L1 bounding box loss in the object detection loss.
+ giou_loss_coefficient (`float`, *optional*, defaults to 2):
+ Relative weight of the generalized IoU loss in the object detection loss.
+ eos_coefficient (`float`, *optional*, defaults to 0.1):
+ Relative classification weight of the 'no-object' class in the object detection loss.
+
+ Examples:
+
+ ```python
+ >>> from transformers import TableTransformerModel, TableTransformerConfig
+
+ >>> # Initializing a Table Transformer microsoft/table-transformer-detection style configuration
+ >>> configuration = TableTransformerConfig()
+
+ >>> # Initializing a model from the microsoft/table-transformer-detection style configuration
+ >>> model = TableTransformerModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "table-transformer"
+ keys_to_ignore_at_inference = ["past_key_values"]
+ attribute_map = {
+ "hidden_size": "d_model",
+ "num_attention_heads": "encoder_attention_heads",
+ }
+
+ # Copied from transformers.models.detr.configuration_detr.DetrConfig.__init__
+ def __init__(
+ self,
+ use_timm_backbone=True,
+ backbone_config=None,
+ num_channels=3,
+ num_queries=100,
+ encoder_layers=6,
+ encoder_ffn_dim=2048,
+ encoder_attention_heads=8,
+ decoder_layers=6,
+ decoder_ffn_dim=2048,
+ decoder_attention_heads=8,
+ encoder_layerdrop=0.0,
+ decoder_layerdrop=0.0,
+ is_encoder_decoder=True,
+ activation_function="relu",
+ d_model=256,
+ dropout=0.1,
+ attention_dropout=0.0,
+ activation_dropout=0.0,
+ init_std=0.02,
+ init_xavier_std=1.0,
+ auxiliary_loss=False,
+ position_embedding_type="sine",
+ backbone="resnet50",
+ use_pretrained_backbone=True,
+ backbone_kwargs=None,
+ dilation=False,
+ class_cost=1,
+ bbox_cost=5,
+ giou_cost=2,
+ mask_loss_coefficient=1,
+ dice_loss_coefficient=1,
+ bbox_loss_coefficient=5,
+ giou_loss_coefficient=2,
+ eos_coefficient=0.1,
+ **kwargs,
+ ):
+ if not use_timm_backbone and use_pretrained_backbone:
+ raise ValueError(
+ "Loading pretrained backbone weights from the transformers library is not supported yet. `use_timm_backbone` must be set to `True` when `use_pretrained_backbone=True`"
+ )
+
+ if backbone_config is not None and backbone is not None:
+ raise ValueError("You can't specify both `backbone` and `backbone_config`.")
+
+ if backbone_config is not None and use_timm_backbone:
+ raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`.")
+
+ if backbone_kwargs is not None and backbone_kwargs and backbone_config is not None:
+ raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.")
+
+ if not use_timm_backbone:
+ if backbone_config is None:
+ logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
+ backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage4"])
+ elif isinstance(backbone_config, dict):
+ backbone_model_type = backbone_config.get("model_type")
+ config_class = CONFIG_MAPPING[backbone_model_type]
+ backbone_config = config_class.from_dict(backbone_config)
+ # set timm attributes to None
+ dilation, backbone, use_pretrained_backbone = None, None, None
+
+ self.use_timm_backbone = use_timm_backbone
+ self.backbone_config = backbone_config
+ self.num_channels = num_channels
+ self.num_queries = num_queries
+ self.d_model = d_model
+ self.encoder_ffn_dim = encoder_ffn_dim
+ self.encoder_layers = encoder_layers
+ self.encoder_attention_heads = encoder_attention_heads
+ self.decoder_ffn_dim = decoder_ffn_dim
+ self.decoder_layers = decoder_layers
+ self.decoder_attention_heads = decoder_attention_heads
+ self.dropout = dropout
+ self.attention_dropout = attention_dropout
+ self.activation_dropout = activation_dropout
+ self.activation_function = activation_function
+ self.init_std = init_std
+ self.init_xavier_std = init_xavier_std
+ self.encoder_layerdrop = encoder_layerdrop
+ self.decoder_layerdrop = decoder_layerdrop
+ self.num_hidden_layers = encoder_layers
+ self.auxiliary_loss = auxiliary_loss
+ self.position_embedding_type = position_embedding_type
+ self.backbone = backbone
+ self.use_pretrained_backbone = use_pretrained_backbone
+ self.backbone_kwargs = backbone_kwargs
+ self.dilation = dilation
+ # Hungarian matcher
+ self.class_cost = class_cost
+ self.bbox_cost = bbox_cost
+ self.giou_cost = giou_cost
+ # Loss coefficients
+ self.mask_loss_coefficient = mask_loss_coefficient
+ self.dice_loss_coefficient = dice_loss_coefficient
+ self.bbox_loss_coefficient = bbox_loss_coefficient
+ self.giou_loss_coefficient = giou_loss_coefficient
+ self.eos_coefficient = eos_coefficient
+ super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
+
+ @property
+ def num_attention_heads(self) -> int:
+ return self.encoder_attention_heads
+
+ @property
+ def hidden_size(self) -> int:
+ return self.d_model
+
+
+# Copied from transformers.models.detr.configuration_detr.DetrOnnxConfig
+class TableTransformerOnnxConfig(OnnxConfig):
+ torch_onnx_minimum_version = version.parse("1.11")
+
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ return OrderedDict(
+ [
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
+ ("pixel_mask", {0: "batch"}),
+ ]
+ )
+
+ @property
+ def atol_for_validation(self) -> float:
+ return 1e-5
+
+ @property
+ def default_onnx_opset(self) -> int:
+ return 12
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/table_transformer/convert_table_transformer_to_hf.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/table_transformer/convert_table_transformer_to_hf.py
new file mode 100644
index 0000000000000000000000000000000000000000..d06c3eb26b616929bf7a9f0c8b2fe7f7ac89dbe9
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/table_transformer/convert_table_transformer_to_hf.py
@@ -0,0 +1,318 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert Table Transformer checkpoints with timm-backbone.
+
+URL: https://github.com/microsoft/table-transformer
+"""
+
+
+import argparse
+from collections import OrderedDict
+from pathlib import Path
+
+import torch
+from huggingface_hub import hf_hub_download
+from PIL import Image
+from torchvision.transforms import functional as F
+
+from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
+from transformers.utils import logging
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+# here we list all keys to be renamed (original name on the left, our name on the right)
+rename_keys = []
+for i in range(6):
+ # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
+ rename_keys.append(
+ (f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight")
+ )
+ rename_keys.append(
+ (f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")
+ )
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
+ rename_keys.append(
+ (f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")
+ )
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
+ # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
+ rename_keys.append(
+ (f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight")
+ )
+ rename_keys.append(
+ (f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")
+ )
+ rename_keys.append(
+ (
+ f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
+ f"decoder.layers.{i}.encoder_attn.out_proj.weight",
+ )
+ )
+ rename_keys.append(
+ (
+ f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
+ f"decoder.layers.{i}.encoder_attn.out_proj.bias",
+ )
+ )
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
+ rename_keys.append(
+ (f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")
+ )
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
+ rename_keys.append(
+ (f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")
+ )
+ rename_keys.append(
+ (f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")
+ )
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
+
+# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
+rename_keys.extend(
+ [
+ ("input_proj.weight", "input_projection.weight"),
+ ("input_proj.bias", "input_projection.bias"),
+ ("query_embed.weight", "query_position_embeddings.weight"),
+ ("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
+ ("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
+ ("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
+ ("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
+ ("class_embed.weight", "class_labels_classifier.weight"),
+ ("class_embed.bias", "class_labels_classifier.bias"),
+ ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
+ ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
+ ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
+ ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
+ ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
+ ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
+ ]
+)
+
+
+def rename_key(state_dict, old, new):
+ val = state_dict.pop(old)
+ state_dict[new] = val
+
+
+def rename_backbone_keys(state_dict):
+ new_state_dict = OrderedDict()
+ for key, value in state_dict.items():
+ if "backbone.0.body" in key:
+ new_key = key.replace("backbone.0.body", "backbone.conv_encoder.model")
+ new_state_dict[new_key] = value
+ else:
+ new_state_dict[key] = value
+
+ return new_state_dict
+
+
+def read_in_q_k_v(state_dict):
+ prefix = ""
+
+ # first: transformer encoder
+ for i in range(6):
+ # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
+ in_proj_weight = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight")
+ in_proj_bias = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias")
+ # next, add query, keys and values (in that order) to the state dict
+ state_dict[f"encoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :]
+ state_dict[f"encoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256]
+ state_dict[f"encoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :]
+ state_dict[f"encoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512]
+ state_dict[f"encoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :]
+ state_dict[f"encoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:]
+ # next: transformer decoder (which is a bit more complex because it also includes cross-attention)
+ for i in range(6):
+ # read in weights + bias of input projection layer of self-attention
+ in_proj_weight = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight")
+ in_proj_bias = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias")
+ # next, add query, keys and values (in that order) to the state dict
+ state_dict[f"decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :]
+ state_dict[f"decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256]
+ state_dict[f"decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :]
+ state_dict[f"decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512]
+ state_dict[f"decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :]
+ state_dict[f"decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:]
+ # read in weights + bias of input projection layer of cross-attention
+ in_proj_weight_cross_attn = state_dict.pop(
+ f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight"
+ )
+ in_proj_bias_cross_attn = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias")
+ # next, add query, keys and values (in that order) of cross-attention to the state dict
+ state_dict[f"decoder.layers.{i}.encoder_attn.q_proj.weight"] = in_proj_weight_cross_attn[:256, :]
+ state_dict[f"decoder.layers.{i}.encoder_attn.q_proj.bias"] = in_proj_bias_cross_attn[:256]
+ state_dict[f"decoder.layers.{i}.encoder_attn.k_proj.weight"] = in_proj_weight_cross_attn[256:512, :]
+ state_dict[f"decoder.layers.{i}.encoder_attn.k_proj.bias"] = in_proj_bias_cross_attn[256:512]
+ state_dict[f"decoder.layers.{i}.encoder_attn.v_proj.weight"] = in_proj_weight_cross_attn[-256:, :]
+ state_dict[f"decoder.layers.{i}.encoder_attn.v_proj.bias"] = in_proj_bias_cross_attn[-256:]
+
+
+def resize(image, checkpoint_url):
+ width, height = image.size
+ current_max_size = max(width, height)
+ target_max_size = 800 if "detection" in checkpoint_url else 1000
+ scale = target_max_size / current_max_size
+ resized_image = image.resize((int(round(scale * width)), int(round(scale * height))))
+
+ return resized_image
+
+
+def normalize(image):
+ image = F.to_tensor(image)
+ image = F.normalize(image, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+ return image
+
+
+@torch.no_grad()
+def convert_table_transformer_checkpoint(checkpoint_url, pytorch_dump_folder_path, push_to_hub):
+ """
+ Copy/paste/tweak model's weights to our DETR structure.
+ """
+
+ logger.info("Converting model...")
+
+ # load original state dict
+ state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
+ # rename keys
+ for src, dest in rename_keys:
+ rename_key(state_dict, src, dest)
+ state_dict = rename_backbone_keys(state_dict)
+ # query, key and value matrices need special treatment
+ read_in_q_k_v(state_dict)
+ # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
+ prefix = "model."
+ for key in state_dict.copy().keys():
+ if not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor"):
+ val = state_dict.pop(key)
+ state_dict[prefix + key] = val
+ # create HuggingFace model and load state dict
+ config = TableTransformerConfig(
+ backbone="resnet18",
+ mask_loss_coefficient=1,
+ dice_loss_coefficient=1,
+ ce_loss_coefficient=1,
+ bbox_loss_coefficient=5,
+ giou_loss_coefficient=2,
+ eos_coefficient=0.4,
+ class_cost=1,
+ bbox_cost=5,
+ giou_cost=2,
+ )
+
+ if "detection" in checkpoint_url:
+ config.num_queries = 15
+ config.num_labels = 2
+ id2label = {0: "table", 1: "table rotated"}
+ config.id2label = id2label
+ config.label2id = {v: k for k, v in id2label.items()}
+ else:
+ config.num_queries = 125
+ config.num_labels = 6
+ id2label = {
+ 0: "table",
+ 1: "table column",
+ 2: "table row",
+ 3: "table column header",
+ 4: "table projected row header",
+ 5: "table spanning cell",
+ }
+ config.id2label = id2label
+ config.label2id = {v: k for k, v in id2label.items()}
+
+ image_processor = DetrImageProcessor(
+ format="coco_detection", max_size=800 if "detection" in checkpoint_url else 1000
+ )
+ model = TableTransformerForObjectDetection(config)
+ model.load_state_dict(state_dict)
+ model.eval()
+
+ # verify our conversion
+ filename = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
+ file_path = hf_hub_download(repo_id="nielsr/example-pdf", repo_type="dataset", filename=filename)
+ image = Image.open(file_path).convert("RGB")
+ pixel_values = normalize(resize(image, checkpoint_url)).unsqueeze(0)
+
+ outputs = model(pixel_values)
+
+ if "detection" in checkpoint_url:
+ expected_shape = (1, 15, 3)
+ expected_logits = torch.tensor(
+ [[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]]
+ )
+ expected_boxes = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]])
+
+ else:
+ expected_shape = (1, 125, 7)
+ expected_logits = torch.tensor(
+ [[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]]
+ )
+ expected_boxes = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]])
+
+ assert outputs.logits.shape == expected_shape
+ assert torch.allclose(outputs.logits[0, :3, :3], expected_logits, atol=1e-4)
+ assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes, atol=1e-4)
+ print("Looks ok!")
+
+ if pytorch_dump_folder_path is not None:
+ # Save model and image processor
+ logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...")
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
+ model.save_pretrained(pytorch_dump_folder_path)
+ image_processor.save_pretrained(pytorch_dump_folder_path)
+
+ if push_to_hub:
+ # Push model to HF hub
+ logger.info("Pushing model to the hub...")
+ model_name = (
+ "microsoft/table-transformer-detection"
+ if "detection" in checkpoint_url
+ else "microsoft/table-transformer-structure-recognition"
+ )
+ model.push_to_hub(model_name)
+ image_processor.push_to_hub(model_name)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument(
+ "--checkpoint_url",
+ default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
+ type=str,
+ choices=[
+ "https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
+ "https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
+ ],
+ help="URL of the Table Transformer checkpoint you'd like to convert.",
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
+ )
+ parser.add_argument(
+ "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
+ )
+ args = parser.parse_args()
+ convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/table_transformer/convert_table_transformer_to_hf_no_timm.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/table_transformer/convert_table_transformer_to_hf_no_timm.py
new file mode 100644
index 0000000000000000000000000000000000000000..0a2b7b87fe972a4c79a4c573b52164eb7e01d0ad
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/table_transformer/convert_table_transformer_to_hf_no_timm.py
@@ -0,0 +1,435 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert Table Transformer checkpoints with native (Transformers) backbone.
+
+URL: https://github.com/microsoft/table-transformer
+"""
+
+
+import argparse
+from pathlib import Path
+
+import torch
+from huggingface_hub import hf_hub_download
+from PIL import Image
+from torchvision.transforms import functional as F
+
+from transformers import DetrImageProcessor, ResNetConfig, TableTransformerConfig, TableTransformerForObjectDetection
+from transformers.utils import logging
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+
+def create_rename_keys(config):
+ # here we list all keys to be renamed (original name on the left, our name on the right)
+ rename_keys = []
+
+ # stem
+ # fmt: off
+ rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight"))
+ rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight"))
+ rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias"))
+ rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean"))
+ rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var"))
+ # stages
+ for stage_idx in range(len(config.backbone_config.depths)):
+ for layer_idx in range(config.backbone_config.depths[stage_idx]):
+ rename_keys.append(
+ (
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv1.weight",
+ f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.0.convolution.weight",
+ )
+ )
+ rename_keys.append(
+ (
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn1.weight",
+ f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.0.normalization.weight",
+ )
+ )
+ rename_keys.append(
+ (
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn1.bias",
+ f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.0.normalization.bias",
+ )
+ )
+ rename_keys.append(
+ (
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn1.running_mean",
+ f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.0.normalization.running_mean",
+ )
+ )
+ rename_keys.append(
+ (
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn1.running_var",
+ f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.0.normalization.running_var",
+ )
+ )
+ rename_keys.append(
+ (
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv2.weight",
+ f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.1.convolution.weight",
+ )
+ )
+ rename_keys.append(
+ (
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn2.weight",
+ f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.1.normalization.weight",
+ )
+ )
+ rename_keys.append(
+ (
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn2.bias",
+ f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.1.normalization.bias",
+ )
+ )
+ rename_keys.append(
+ (
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn2.running_mean",
+ f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.1.normalization.running_mean",
+ )
+ )
+ rename_keys.append(
+ (
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn2.running_var",
+ f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.1.normalization.running_var",
+ )
+ )
+ # all ResNet stages except the first one have a downsample as first layer
+ if stage_idx != 0 and layer_idx == 0:
+ rename_keys.append(
+ (
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight",
+ f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight",
+ )
+ )
+ rename_keys.append(
+ (
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight",
+ f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight",
+ )
+ )
+ rename_keys.append(
+ (
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias",
+ f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias",
+ )
+ )
+ rename_keys.append(
+ (
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean",
+ f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean",
+ )
+ )
+ rename_keys.append(
+ (
+ # "backbone.conv_encoder.model.encoder.stages.3.layers.0.shortcut.normalization.running_var"
+ f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var",
+ f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var",
+ )
+ )
+ # fmt: on
+
+ for i in range(config.encoder_layers):
+ # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
+ rename_keys.append(
+ (
+ f"transformer.encoder.layers.{i}.self_attn.out_proj.weight",
+ f"encoder.layers.{i}.self_attn.out_proj.weight",
+ )
+ )
+ rename_keys.append(
+ (f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")
+ )
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
+ rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
+ rename_keys.append(
+ (f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")
+ )
+ rename_keys.append(
+ (f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias")
+ )
+ rename_keys.append(
+ (f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight")
+ )
+ rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
+ # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
+ rename_keys.append(
+ (
+ f"transformer.decoder.layers.{i}.self_attn.out_proj.weight",
+ f"decoder.layers.{i}.self_attn.out_proj.weight",
+ )
+ )
+ rename_keys.append(
+ (f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")
+ )
+ rename_keys.append(
+ (
+ f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
+ f"decoder.layers.{i}.encoder_attn.out_proj.weight",
+ )
+ )
+ rename_keys.append(
+ (
+ f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
+ f"decoder.layers.{i}.encoder_attn.out_proj.bias",
+ )
+ )
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
+ rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
+ rename_keys.append(
+ (f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")
+ )
+ rename_keys.append(
+ (f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias")
+ )
+ rename_keys.append(
+ (f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")
+ )
+ rename_keys.append(
+ (f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")
+ )
+ rename_keys.append(
+ (f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight")
+ )
+ rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
+
+ # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
+ rename_keys.extend(
+ [
+ ("input_proj.weight", "input_projection.weight"),
+ ("input_proj.bias", "input_projection.bias"),
+ ("query_embed.weight", "query_position_embeddings.weight"),
+ ("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
+ ("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
+ ("class_embed.weight", "class_labels_classifier.weight"),
+ ("class_embed.bias", "class_labels_classifier.bias"),
+ ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
+ ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
+ ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
+ ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
+ ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
+ ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
+ ("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
+ ("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
+ ]
+ )
+
+ return rename_keys
+
+
+def rename_key(state_dict, old, new):
+ val = state_dict.pop(old)
+ state_dict[new] = val
+
+
+def read_in_q_k_v(state_dict, is_panoptic=False):
+ prefix = ""
+ if is_panoptic:
+ prefix = "detr."
+
+ # first: transformer encoder
+ for i in range(6):
+ # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
+ in_proj_weight = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight")
+ in_proj_bias = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias")
+ # next, add query, keys and values (in that order) to the state dict
+ state_dict[f"encoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :]
+ state_dict[f"encoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256]
+ state_dict[f"encoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :]
+ state_dict[f"encoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512]
+ state_dict[f"encoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :]
+ state_dict[f"encoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:]
+ # next: transformer decoder (which is a bit more complex because it also includes cross-attention)
+ for i in range(6):
+ # read in weights + bias of input projection layer of self-attention
+ in_proj_weight = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight")
+ in_proj_bias = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias")
+ # next, add query, keys and values (in that order) to the state dict
+ state_dict[f"decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :]
+ state_dict[f"decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256]
+ state_dict[f"decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :]
+ state_dict[f"decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512]
+ state_dict[f"decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :]
+ state_dict[f"decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:]
+ # read in weights + bias of input projection layer of cross-attention
+ in_proj_weight_cross_attn = state_dict.pop(
+ f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight"
+ )
+ in_proj_bias_cross_attn = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias")
+ # next, add query, keys and values (in that order) of cross-attention to the state dict
+ state_dict[f"decoder.layers.{i}.encoder_attn.q_proj.weight"] = in_proj_weight_cross_attn[:256, :]
+ state_dict[f"decoder.layers.{i}.encoder_attn.q_proj.bias"] = in_proj_bias_cross_attn[:256]
+ state_dict[f"decoder.layers.{i}.encoder_attn.k_proj.weight"] = in_proj_weight_cross_attn[256:512, :]
+ state_dict[f"decoder.layers.{i}.encoder_attn.k_proj.bias"] = in_proj_bias_cross_attn[256:512]
+ state_dict[f"decoder.layers.{i}.encoder_attn.v_proj.weight"] = in_proj_weight_cross_attn[-256:, :]
+ state_dict[f"decoder.layers.{i}.encoder_attn.v_proj.bias"] = in_proj_bias_cross_attn[-256:]
+
+
+def resize(image, checkpoint_url):
+ width, height = image.size
+ current_max_size = max(width, height)
+ target_max_size = 800 if "detection" in checkpoint_url else 1000
+ scale = target_max_size / current_max_size
+ resized_image = image.resize((int(round(scale * width)), int(round(scale * height))))
+
+ return resized_image
+
+
+def normalize(image):
+ image = F.to_tensor(image)
+ image = F.normalize(image, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+ return image
+
+
+@torch.no_grad()
+def convert_table_transformer_checkpoint(checkpoint_url, pytorch_dump_folder_path, push_to_hub):
+ """
+ Copy/paste/tweak model's weights to our DETR structure.
+ """
+
+ logger.info("Converting model...")
+
+ # create HuggingFace model and load state dict
+ backbone_config = ResNetConfig.from_pretrained(
+ "microsoft/resnet-18", out_features=["stage1", "stage2", "stage3", "stage4"]
+ )
+
+ config = TableTransformerConfig(
+ backbone_config=backbone_config,
+ use_timm_backbone=False,
+ mask_loss_coefficient=1,
+ dice_loss_coefficient=1,
+ ce_loss_coefficient=1,
+ bbox_loss_coefficient=5,
+ giou_loss_coefficient=2,
+ eos_coefficient=0.4,
+ class_cost=1,
+ bbox_cost=5,
+ giou_cost=2,
+ )
+
+ # load original state dict
+ state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
+
+ # rename keys
+ for src, dest in create_rename_keys(config):
+ rename_key(state_dict, src, dest)
+ # query, key and value matrices need special treatment
+ read_in_q_k_v(state_dict)
+ # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
+ prefix = "model."
+ for key in state_dict.copy().keys():
+ if not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor"):
+ val = state_dict.pop(key)
+ state_dict[prefix + key] = val
+
+ if "detection" in checkpoint_url:
+ config.num_queries = 15
+ config.num_labels = 2
+ id2label = {0: "table", 1: "table rotated"}
+ config.id2label = id2label
+ config.label2id = {v: k for k, v in id2label.items()}
+ else:
+ config.num_queries = 125
+ config.num_labels = 6
+ id2label = {
+ 0: "table",
+ 1: "table column",
+ 2: "table row",
+ 3: "table column header",
+ 4: "table projected row header",
+ 5: "table spanning cell",
+ }
+ config.id2label = id2label
+ config.label2id = {v: k for k, v in id2label.items()}
+
+ image_processor = DetrImageProcessor(format="coco_detection", size={"longest_edge": 800})
+ model = TableTransformerForObjectDetection(config)
+ model.load_state_dict(state_dict)
+ model.eval()
+
+ # verify our conversion
+ filename = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
+ file_path = hf_hub_download(repo_id="nielsr/example-pdf", repo_type="dataset", filename=filename)
+ image = Image.open(file_path).convert("RGB")
+ pixel_values = normalize(resize(image, checkpoint_url)).unsqueeze(0)
+
+ outputs = model(pixel_values)
+
+ if "detection" in checkpoint_url:
+ expected_shape = (1, 15, 3)
+ expected_logits = torch.tensor(
+ [[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]]
+ )
+ expected_boxes = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]])
+
+ else:
+ expected_shape = (1, 125, 7)
+ expected_logits = torch.tensor(
+ [[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]]
+ )
+ expected_boxes = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]])
+
+ assert outputs.logits.shape == expected_shape
+ assert torch.allclose(outputs.logits[0, :3, :3], expected_logits, atol=1e-4)
+ assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes, atol=1e-4)
+ print("Looks ok!")
+
+ if pytorch_dump_folder_path is not None:
+ # Save model and image processor
+ logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...")
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
+ model.save_pretrained(pytorch_dump_folder_path)
+ image_processor.save_pretrained(pytorch_dump_folder_path)
+
+ if push_to_hub:
+ # Push model to HF hub
+ logger.info("Pushing model to the hub...")
+ model_name = (
+ "microsoft/table-transformer-detection"
+ if "detection" in checkpoint_url
+ else "microsoft/table-transformer-structure-recognition"
+ )
+ model.push_to_hub(model_name, revision="no_timm")
+ image_processor.push_to_hub(model_name, revision="no_timm")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument(
+ "--checkpoint_url",
+ default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
+ type=str,
+ choices=[
+ "https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
+ "https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
+ ],
+ help="URL of the Table Transformer checkpoint you'd like to convert.",
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
+ )
+ parser.add_argument(
+ "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
+ )
+ args = parser.parse_args()
+ convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/table_transformer/modeling_table_transformer.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/table_transformer/modeling_table_transformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..8e577a65a5fe0073a196c001f67998c49316dc70
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/table_transformer/modeling_table_transformer.py
@@ -0,0 +1,2000 @@
+# coding=utf-8
+# Copyright 2022 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch Table Transformer model."""
+
+
+import math
+from dataclasses import dataclass
+from typing import Dict, List, Optional, Tuple, Union
+
+import torch
+from torch import Tensor, nn
+
+from ...activations import ACT2FN
+from ...modeling_attn_mask_utils import _prepare_4d_attention_mask
+from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithCrossAttentions, Seq2SeqModelOutput
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_accelerate_available,
+ is_scipy_available,
+ is_timm_available,
+ is_vision_available,
+ logging,
+ replace_return_docstrings,
+ requires_backends,
+)
+from ...utils.backbone_utils import load_backbone
+from .configuration_table_transformer import TableTransformerConfig
+
+
+if is_scipy_available():
+ from scipy.optimize import linear_sum_assignment
+
+if is_timm_available():
+ from timm import create_model
+
+if is_vision_available():
+ from transformers.image_transforms import center_to_corners_format
+
+if is_accelerate_available():
+ from accelerate import PartialState
+ from accelerate.utils import reduce
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "TableTransformerConfig"
+_CHECKPOINT_FOR_DOC = "microsoft/table-transformer-detection"
+
+
+from ..deprecated._archive_maps import TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+@dataclass
+# Copied from transformers.models.detr.modeling_detr.DetrDecoderOutput with DETR->TABLE_TRANSFORMER,Detr->TableTransformer
+class TableTransformerDecoderOutput(BaseModelOutputWithCrossAttentions):
+ """
+ Base class for outputs of the TABLE_TRANSFORMER decoder. This class adds one attribute to BaseModelOutputWithCrossAttentions,
+ namely an optional stack of intermediate decoder activations, i.e. the output of each decoder layer, each of them
+ gone through a layernorm. This is useful when training the model with auxiliary decoding losses.
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
+ plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax,
+ used to compute the weighted average in the cross-attention heads.
+ intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`):
+ Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a
+ layernorm.
+ """
+
+ intermediate_hidden_states: Optional[torch.FloatTensor] = None
+
+
+@dataclass
+# Copied from transformers.models.detr.modeling_detr.DetrModelOutput with DETR->TABLE_TRANSFORMER,Detr->TableTransformer
+class TableTransformerModelOutput(Seq2SeqModelOutput):
+ """
+ Base class for outputs of the TABLE_TRANSFORMER encoder-decoder model. This class adds one attribute to Seq2SeqModelOutput,
+ namely an optional stack of intermediate decoder activations, i.e. the output of each decoder layer, each of them
+ gone through a layernorm. This is useful when training the model with auxiliary decoding losses.
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the decoder of the model.
+ decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each
+ layer plus the initial embedding outputs.
+ decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the
+ weighted average in the self-attention heads.
+ cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax,
+ used to compute the weighted average in the cross-attention heads.
+ encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
+ encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each
+ layer plus the initial embedding outputs.
+ encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the
+ weighted average in the self-attention heads.
+ intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, sequence_length, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`):
+ Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a
+ layernorm.
+ """
+
+ intermediate_hidden_states: Optional[torch.FloatTensor] = None
+
+
+@dataclass
+# Copied from transformers.models.detr.modeling_detr.DetrObjectDetectionOutput with Detr->TableTransformer,DetrImageProcessor->DetrImageProcessor
+class TableTransformerObjectDetectionOutput(ModelOutput):
+ """
+ Output type of [`TableTransformerForObjectDetection`].
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)):
+ Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a
+ bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized
+ scale-invariant IoU loss.
+ loss_dict (`Dict`, *optional*):
+ A dictionary containing the individual losses. Useful for logging.
+ logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`):
+ Classification logits (including no-object) for all queries.
+ pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
+ Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
+ values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding
+ possible padding). You can use [`~TableTransformerImageProcessor.post_process_object_detection`] to retrieve the
+ unnormalized bounding boxes.
+ auxiliary_outputs (`list[Dict]`, *optional*):
+ Optional, only returned when auxilary losses are activated (i.e. `config.auxiliary_loss` is set to `True`)
+ and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and
+ `pred_boxes`) for each decoder layer.
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the decoder of the model.
+ decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each
+ layer plus the initial embedding outputs.
+ decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the
+ weighted average in the self-attention heads.
+ cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax,
+ used to compute the weighted average in the cross-attention heads.
+ encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
+ encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each
+ layer plus the initial embedding outputs.
+ encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the
+ weighted average in the self-attention heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ loss_dict: Optional[Dict] = None
+ logits: torch.FloatTensor = None
+ pred_boxes: torch.FloatTensor = None
+ auxiliary_outputs: Optional[List[Dict]] = None
+ last_hidden_state: Optional[torch.FloatTensor] = None
+ decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ encoder_last_hidden_state: Optional[torch.FloatTensor] = None
+ encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+# Copied from transformers.models.detr.modeling_detr.DetrFrozenBatchNorm2d with Detr->TableTransformer
+class TableTransformerFrozenBatchNorm2d(nn.Module):
+ """
+ BatchNorm2d where the batch statistics and the affine parameters are fixed.
+
+ Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than
+ torchvision.models.resnet[18,34,50,101] produce nans.
+ """
+
+ def __init__(self, n):
+ super().__init__()
+ self.register_buffer("weight", torch.ones(n))
+ self.register_buffer("bias", torch.zeros(n))
+ self.register_buffer("running_mean", torch.zeros(n))
+ self.register_buffer("running_var", torch.ones(n))
+
+ def _load_from_state_dict(
+ self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
+ ):
+ num_batches_tracked_key = prefix + "num_batches_tracked"
+ if num_batches_tracked_key in state_dict:
+ del state_dict[num_batches_tracked_key]
+
+ super()._load_from_state_dict(
+ state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
+ )
+
+ def forward(self, x):
+ # move reshapes to the beginning
+ # to make it user-friendly
+ weight = self.weight.reshape(1, -1, 1, 1)
+ bias = self.bias.reshape(1, -1, 1, 1)
+ running_var = self.running_var.reshape(1, -1, 1, 1)
+ running_mean = self.running_mean.reshape(1, -1, 1, 1)
+ epsilon = 1e-5
+ scale = weight * (running_var + epsilon).rsqrt()
+ bias = bias - running_mean * scale
+ return x * scale + bias
+
+
+# Copied from transformers.models.detr.modeling_detr.replace_batch_norm with Detr->TableTransformer
+def replace_batch_norm(model):
+ r"""
+ Recursively replace all `torch.nn.BatchNorm2d` with `TableTransformerFrozenBatchNorm2d`.
+
+ Args:
+ model (torch.nn.Module):
+ input model
+ """
+ for name, module in model.named_children():
+ if isinstance(module, nn.BatchNorm2d):
+ new_module = TableTransformerFrozenBatchNorm2d(module.num_features)
+
+ if not module.weight.device == torch.device("meta"):
+ new_module.weight.data.copy_(module.weight)
+ new_module.bias.data.copy_(module.bias)
+ new_module.running_mean.data.copy_(module.running_mean)
+ new_module.running_var.data.copy_(module.running_var)
+
+ model._modules[name] = new_module
+
+ if len(list(module.children())) > 0:
+ replace_batch_norm(module)
+
+
+# Copied from transformers.models.detr.modeling_detr.DetrConvEncoder with Detr->TableTransformer
+class TableTransformerConvEncoder(nn.Module):
+ """
+ Convolutional backbone, using either the AutoBackbone API or one from the timm library.
+
+ nn.BatchNorm2d layers are replaced by TableTransformerFrozenBatchNorm2d as defined above.
+
+ """
+
+ def __init__(self, config):
+ super().__init__()
+
+ self.config = config
+
+ if config.use_timm_backbone:
+ requires_backends(self, ["timm"])
+ kwargs = {}
+ if config.dilation:
+ kwargs["output_stride"] = 16
+ backbone = create_model(
+ config.backbone,
+ pretrained=config.use_pretrained_backbone,
+ features_only=True,
+ out_indices=(1, 2, 3, 4),
+ in_chans=config.num_channels,
+ **kwargs,
+ )
+ else:
+ backbone = load_backbone(config)
+
+ # replace batch norm by frozen batch norm
+ with torch.no_grad():
+ replace_batch_norm(backbone)
+ self.model = backbone
+ self.intermediate_channel_sizes = (
+ self.model.feature_info.channels() if config.use_timm_backbone else self.model.channels
+ )
+
+ backbone_model_type = config.backbone if config.use_timm_backbone else config.backbone_config.model_type
+ if "resnet" in backbone_model_type:
+ for name, parameter in self.model.named_parameters():
+ if config.use_timm_backbone:
+ if "layer2" not in name and "layer3" not in name and "layer4" not in name:
+ parameter.requires_grad_(False)
+ else:
+ if "stage.1" not in name and "stage.2" not in name and "stage.3" not in name:
+ parameter.requires_grad_(False)
+
+ def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor):
+ # send pixel_values through the model to get list of feature maps
+ features = self.model(pixel_values) if self.config.use_timm_backbone else self.model(pixel_values).feature_maps
+
+ out = []
+ for feature_map in features:
+ # downsample pixel_mask to match shape of corresponding feature_map
+ mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0]
+ out.append((feature_map, mask))
+ return out
+
+
+# Copied from transformers.models.detr.modeling_detr.DetrConvModel with Detr->TableTransformer
+class TableTransformerConvModel(nn.Module):
+ """
+ This module adds 2D position embeddings to all intermediate feature maps of the convolutional encoder.
+ """
+
+ def __init__(self, conv_encoder, position_embedding):
+ super().__init__()
+ self.conv_encoder = conv_encoder
+ self.position_embedding = position_embedding
+
+ def forward(self, pixel_values, pixel_mask):
+ # send pixel_values and pixel_mask through backbone to get list of (feature_map, pixel_mask) tuples
+ out = self.conv_encoder(pixel_values, pixel_mask)
+ pos = []
+ for feature_map, mask in out:
+ # position encoding
+ pos.append(self.position_embedding(feature_map, mask).to(feature_map.dtype))
+
+ return out, pos
+
+
+# Copied from transformers.models.detr.modeling_detr.DetrSinePositionEmbedding with Detr->TableTransformer
+class TableTransformerSinePositionEmbedding(nn.Module):
+ """
+ This is a more standard version of the position embedding, very similar to the one used by the Attention is all you
+ need paper, generalized to work on images.
+ """
+
+ def __init__(self, embedding_dim=64, temperature=10000, normalize=False, scale=None):
+ super().__init__()
+ self.embedding_dim = embedding_dim
+ self.temperature = temperature
+ self.normalize = normalize
+ if scale is not None and normalize is False:
+ raise ValueError("normalize should be True if scale is passed")
+ if scale is None:
+ scale = 2 * math.pi
+ self.scale = scale
+
+ def forward(self, pixel_values, pixel_mask):
+ if pixel_mask is None:
+ raise ValueError("No pixel mask provided")
+ y_embed = pixel_mask.cumsum(1, dtype=torch.float32)
+ x_embed = pixel_mask.cumsum(2, dtype=torch.float32)
+ if self.normalize:
+ y_embed = y_embed / (y_embed[:, -1:, :] + 1e-6) * self.scale
+ x_embed = x_embed / (x_embed[:, :, -1:] + 1e-6) * self.scale
+
+ dim_t = torch.arange(self.embedding_dim, dtype=torch.int64, device=pixel_values.device).float()
+ dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / self.embedding_dim)
+
+ pos_x = x_embed[:, :, :, None] / dim_t
+ pos_y = y_embed[:, :, :, None] / dim_t
+ pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
+ pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
+ pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
+ return pos
+
+
+# Copied from transformers.models.detr.modeling_detr.DetrLearnedPositionEmbedding with Detr->TableTransformer
+class TableTransformerLearnedPositionEmbedding(nn.Module):
+ """
+ This module learns positional embeddings up to a fixed maximum size.
+ """
+
+ def __init__(self, embedding_dim=256):
+ super().__init__()
+ self.row_embeddings = nn.Embedding(50, embedding_dim)
+ self.column_embeddings = nn.Embedding(50, embedding_dim)
+
+ def forward(self, pixel_values, pixel_mask=None):
+ height, width = pixel_values.shape[-2:]
+ width_values = torch.arange(width, device=pixel_values.device)
+ height_values = torch.arange(height, device=pixel_values.device)
+ x_emb = self.column_embeddings(width_values)
+ y_emb = self.row_embeddings(height_values)
+ pos = torch.cat([x_emb.unsqueeze(0).repeat(height, 1, 1), y_emb.unsqueeze(1).repeat(1, width, 1)], dim=-1)
+ pos = pos.permute(2, 0, 1)
+ pos = pos.unsqueeze(0)
+ pos = pos.repeat(pixel_values.shape[0], 1, 1, 1)
+ return pos
+
+
+# Copied from transformers.models.detr.modeling_detr.build_position_encoding with Detr->TableTransformer
+def build_position_encoding(config):
+ n_steps = config.d_model // 2
+ if config.position_embedding_type == "sine":
+ # TODO find a better way of exposing other arguments
+ position_embedding = TableTransformerSinePositionEmbedding(n_steps, normalize=True)
+ elif config.position_embedding_type == "learned":
+ position_embedding = TableTransformerLearnedPositionEmbedding(n_steps)
+ else:
+ raise ValueError(f"Not supported {config.position_embedding_type}")
+
+ return position_embedding
+
+
+# Copied from transformers.models.detr.modeling_detr.DetrAttention with DETR->TABLE_TRANSFORMER,Detr->TableTransformer
+class TableTransformerAttention(nn.Module):
+ """
+ Multi-headed attention from 'Attention Is All You Need' paper.
+
+ Here, we add position embeddings to the queries and keys (as explained in the TABLE_TRANSFORMER paper).
+ """
+
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ bias: bool = True,
+ ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.dropout = dropout
+ self.head_dim = embed_dim // num_heads
+ if self.head_dim * num_heads != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
+ f" {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int):
+ return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def with_pos_embed(self, tensor: torch.Tensor, object_queries: Optional[Tensor], **kwargs):
+ position_embeddings = kwargs.pop("position_embeddings", None)
+
+ if kwargs:
+ raise ValueError(f"Unexpected arguments {kwargs.keys()}")
+
+ if position_embeddings is not None and object_queries is not None:
+ raise ValueError(
+ "Cannot specify both position_embeddings and object_queries. Please use just object_queries"
+ )
+
+ if position_embeddings is not None:
+ logger.warning_once(
+ "position_embeddings has been deprecated and will be removed in v4.34. Please use object_queries instead"
+ )
+ object_queries = position_embeddings
+
+ return tensor if object_queries is None else tensor + object_queries
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ object_queries: Optional[torch.Tensor] = None,
+ key_value_states: Optional[torch.Tensor] = None,
+ spatial_position_embeddings: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ **kwargs,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ position_embeddings = kwargs.pop("position_ebmeddings", None)
+ key_value_position_embeddings = kwargs.pop("key_value_position_embeddings", None)
+
+ if kwargs:
+ raise ValueError(f"Unexpected arguments {kwargs.keys()}")
+
+ if position_embeddings is not None and object_queries is not None:
+ raise ValueError(
+ "Cannot specify both position_embeddings and object_queries. Please use just object_queries"
+ )
+
+ if key_value_position_embeddings is not None and spatial_position_embeddings is not None:
+ raise ValueError(
+ "Cannot specify both key_value_position_embeddings and spatial_position_embeddings. Please use just spatial_position_embeddings"
+ )
+
+ if position_embeddings is not None:
+ logger.warning_once(
+ "position_embeddings has been deprecated and will be removed in v4.34. Please use object_queries instead"
+ )
+ object_queries = position_embeddings
+
+ if key_value_position_embeddings is not None:
+ logger.warning_once(
+ "key_value_position_embeddings has been deprecated and will be removed in v4.34. Please use spatial_position_embeddings instead"
+ )
+ spatial_position_embeddings = key_value_position_embeddings
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+ batch_size, target_len, embed_dim = hidden_states.size()
+
+ # add position embeddings to the hidden states before projecting to queries and keys
+ if object_queries is not None:
+ hidden_states_original = hidden_states
+ hidden_states = self.with_pos_embed(hidden_states, object_queries)
+
+ # add key-value position embeddings to the key value states
+ if spatial_position_embeddings is not None:
+ key_value_states_original = key_value_states
+ key_value_states = self.with_pos_embed(key_value_states, spatial_position_embeddings)
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scaling
+ # get key, value proj
+ if is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(key_value_states), -1, batch_size)
+ value_states = self._shape(self.v_proj(key_value_states_original), -1, batch_size)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, batch_size)
+ value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size)
+
+ proj_shape = (batch_size * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape)
+ key_states = key_states.view(*proj_shape)
+ value_states = value_states.view(*proj_shape)
+
+ source_len = key_states.size(1)
+
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
+
+ if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len):
+ raise ValueError(
+ f"Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ if attention_mask is not None:
+ if attention_mask.size() != (batch_size, 1, target_len, source_len):
+ raise ValueError(
+ f"Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is"
+ f" {attention_mask.size()}"
+ )
+ attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask
+ attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len)
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if output_attentions:
+ # this operation is a bit awkward, but it's required to
+ # make sure that attn_weights keeps its gradient.
+ # In order to do so, attn_weights have to reshaped
+ # twice and have to be reused in the following
+ attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len)
+ attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len)
+ else:
+ attn_weights_reshaped = None
+
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ attn_output = torch.bmm(attn_probs, value_states)
+
+ if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim)
+ attn_output = attn_output.transpose(1, 2)
+ attn_output = attn_output.reshape(batch_size, target_len, embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped
+
+
+class TableTransformerEncoderLayer(nn.Module):
+ # Copied from transformers.models.detr.modeling_detr.DetrEncoderLayer.__init__ with Detr->TableTransformer
+ def __init__(self, config: TableTransformerConfig):
+ super().__init__()
+ self.embed_dim = config.d_model
+ self.self_attn = TableTransformerAttention(
+ embed_dim=self.embed_dim,
+ num_heads=config.encoder_attention_heads,
+ dropout=config.attention_dropout,
+ )
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+ self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
+ self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: torch.Tensor,
+ object_queries: torch.Tensor = None,
+ output_attentions: bool = False,
+ ):
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative
+ values.
+ object_queries (`torch.FloatTensor`, *optional*): object queries, to be added to hidden_states.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ hidden_states, attn_weights = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ object_queries=object_queries,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ residual = hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ hidden_states = residual + hidden_states
+
+ if self.training:
+ if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+class TableTransformerDecoderLayer(nn.Module):
+ # Copied from transformers.models.detr.modeling_detr.DetrDecoderLayer.__init__ with Detr->TableTransformer
+ def __init__(self, config: TableTransformerConfig):
+ super().__init__()
+ self.embed_dim = config.d_model
+
+ self.self_attn = TableTransformerAttention(
+ embed_dim=self.embed_dim,
+ num_heads=config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ )
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.encoder_attn = TableTransformerAttention(
+ self.embed_dim,
+ config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ )
+ self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
+ self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ object_queries: Optional[torch.Tensor] = None,
+ query_position_embeddings: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = False,
+ ):
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative
+ values.
+ object_queries (`torch.FloatTensor`, *optional*):
+ object queries that are added to the queries and keys
+ in the cross-attention layer.
+ query_position_embeddings (`torch.FloatTensor`, *optional*):
+ object queries that are added to the queries and keys
+ in the self-attention layer.
+ encoder_hidden_states (`torch.FloatTensor`):
+ cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
+ encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
+ `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative
+ values.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Self Attention
+ hidden_states, self_attn_weights = self.self_attn(
+ hidden_states=hidden_states,
+ object_queries=query_position_embeddings,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ residual = hidden_states
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
+
+ # Cross-Attention Block
+ cross_attn_weights = None
+ if encoder_hidden_states is not None:
+ hidden_states, cross_attn_weights = self.encoder_attn(
+ hidden_states=hidden_states,
+ object_queries=query_position_embeddings,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attention_mask,
+ spatial_position_embeddings=object_queries,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ residual = hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ # Fully Connected
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights, cross_attn_weights)
+
+ return outputs
+
+
+# Copied from transformers.models.detr.modeling_detr.DetrClassificationHead with Detr->TableTransformer
+class TableTransformerClassificationHead(nn.Module):
+ """Head for sentence-level classification tasks."""
+
+ def __init__(self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float):
+ super().__init__()
+ self.dense = nn.Linear(input_dim, inner_dim)
+ self.dropout = nn.Dropout(p=pooler_dropout)
+ self.out_proj = nn.Linear(inner_dim, num_classes)
+
+ def forward(self, hidden_states: torch.Tensor):
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.dense(hidden_states)
+ hidden_states = torch.tanh(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.out_proj(hidden_states)
+ return hidden_states
+
+
+class TableTransformerPreTrainedModel(PreTrainedModel):
+ config_class = TableTransformerConfig
+ base_model_prefix = "model"
+ main_input_name = "pixel_values"
+ _no_split_modules = [
+ r"TableTransformerConvEncoder",
+ r"TableTransformerEncoderLayer",
+ r"TableTransformerDecoderLayer",
+ ]
+
+ def _init_weights(self, module):
+ std = self.config.init_std
+
+ if isinstance(module, TableTransformerLearnedPositionEmbedding):
+ nn.init.uniform_(module.row_embeddings.weight)
+ nn.init.uniform_(module.column_embeddings.weight)
+ if isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+
+TABLE_TRANSFORMER_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`TableTransformerConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+TABLE_TRANSFORMER_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Padding will be ignored by default should you provide it.
+
+ Pixel values can be obtained using [`DetrImageProcessor`]. See [`DetrImageProcessor.__call__`] for details.
+
+ pixel_mask (`torch.FloatTensor` of shape `(batch_size, height, width)`, *optional*):
+ Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`:
+
+ - 1 for pixels that are real (i.e. **not masked**),
+ - 0 for pixels that are padding (i.e. **masked**).
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ decoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_queries)`, *optional*):
+ Not used by default. Can be used to mask object queries.
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you
+ can choose to directly pass a flattened representation of an image.
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):
+ Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an
+ embedded representation.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+class TableTransformerEncoder(TableTransformerPreTrainedModel):
+ """
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
+ [`TableTransformerEncoderLayer`].
+
+ The encoder updates the flattened feature map through multiple self-attention layers.
+
+ Small tweak for Table Transformer:
+
+ - object_queries are added to the forward pass.
+
+ Args:
+ config: TableTransformerConfig
+ """
+
+ def __init__(self, config: TableTransformerConfig):
+ super().__init__(config)
+
+ self.dropout = config.dropout
+ self.layerdrop = config.encoder_layerdrop
+
+ self.layers = nn.ModuleList([TableTransformerEncoderLayer(config) for _ in range(config.encoder_layers)])
+
+ self.layernorm = nn.LayerNorm(config.d_model)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def forward(
+ self,
+ inputs_embeds=None,
+ attention_mask=None,
+ object_queries=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ ):
+ r"""
+ Args:
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Flattened feature map (output of the backbone + projection layer) that is passed to the encoder.
+
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`:
+
+ - 1 for pixel features that are real (i.e. **not masked**),
+ - 0 for pixel features that are padding (i.e. **masked**).
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ object_queries (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Position embeddings that are added to the queries and keys in each self-attention layer.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ hidden_states = inputs_embeds
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ # expand attention_mask
+ if attention_mask is not None:
+ # [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len]
+ attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+ for encoder_layer in self.layers:
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ to_drop = False
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop: # skip the layer
+ to_drop = True
+
+ if to_drop:
+ layer_outputs = (None, None)
+ else:
+ # we add object_queries as extra input to the encoder_layer
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask,
+ object_queries=object_queries,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ hidden_states = self.layernorm(hidden_states)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+
+# Copied from transformers.models.detr.modeling_detr.DetrDecoder with DETR->TABLE_TRANSFORMER,Detr->TableTransformer
+class TableTransformerDecoder(TableTransformerPreTrainedModel):
+ """
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TableTransformerDecoderLayer`].
+
+ The decoder updates the query embeddings through multiple self-attention and cross-attention layers.
+
+ Some small tweaks for TABLE_TRANSFORMER:
+
+ - object_queries and query_position_embeddings are added to the forward pass.
+ - if self.config.auxiliary_loss is set to True, also returns a stack of activations from all decoding layers.
+
+ Args:
+ config: TableTransformerConfig
+ """
+
+ def __init__(self, config: TableTransformerConfig):
+ super().__init__(config)
+ self.dropout = config.dropout
+ self.layerdrop = config.decoder_layerdrop
+
+ self.layers = nn.ModuleList([TableTransformerDecoderLayer(config) for _ in range(config.decoder_layers)])
+ # in TABLE_TRANSFORMER, the decoder uses layernorm after the last decoder layer output
+ self.layernorm = nn.LayerNorm(config.d_model)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def forward(
+ self,
+ inputs_embeds=None,
+ attention_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ object_queries=None,
+ query_position_embeddings=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ **kwargs,
+ ):
+ r"""
+ Args:
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ The query embeddings that are passed into the decoder.
+
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on certain queries. Mask values selected in `[0, 1]`:
+
+ - 1 for queries that are **not masked**,
+ - 0 for queries that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
+ of the decoder.
+ encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
+ Mask to avoid performing cross-attention on padding pixel_values of the encoder. Mask values selected
+ in `[0, 1]`:
+
+ - 1 for pixels that are real (i.e. **not masked**),
+ - 0 for pixels that are padding (i.e. **masked**).
+
+ object_queries (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Object queries that are added to the queries and keys in each cross-attention layer.
+ query_position_embeddings (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`):
+ , *optional*): Position embeddings that are added to the values and keys in each self-attention layer.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ position_embeddings = kwargs.pop("position_embeddings", None)
+
+ if kwargs:
+ raise ValueError(f"Unexpected arguments {kwargs.keys()}")
+
+ if position_embeddings is not None and object_queries is not None:
+ raise ValueError(
+ "Cannot specify both position_embeddings and object_queries. Please use just object_queries"
+ )
+
+ if position_embeddings is not None:
+ logger.warning_once(
+ "position_embeddings has been deprecated and will be removed in v4.34. Please use object_queries instead"
+ )
+ object_queries = position_embeddings
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if inputs_embeds is not None:
+ hidden_states = inputs_embeds
+ input_shape = inputs_embeds.size()[:-1]
+
+ combined_attention_mask = None
+
+ if attention_mask is not None and combined_attention_mask is not None:
+ # [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len]
+ combined_attention_mask = combined_attention_mask + _prepare_4d_attention_mask(
+ attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
+ )
+
+ # expand encoder attention mask
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
+ # [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len]
+ encoder_attention_mask = _prepare_4d_attention_mask(
+ encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
+ )
+
+ # optional intermediate hidden states
+ intermediate = () if self.config.auxiliary_loss else None
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
+
+ for idx, decoder_layer in enumerate(self.layers):
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop:
+ continue
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ combined_attention_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ None,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=combined_attention_mask,
+ object_queries=object_queries,
+ query_position_embeddings=query_position_embeddings,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if self.config.auxiliary_loss:
+ hidden_states = self.layernorm(hidden_states)
+ intermediate += (hidden_states,)
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ if encoder_hidden_states is not None:
+ all_cross_attentions += (layer_outputs[2],)
+
+ # finally, apply layernorm
+ hidden_states = self.layernorm(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ # stack intermediate decoder activations
+ if self.config.auxiliary_loss:
+ intermediate = torch.stack(intermediate)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions, intermediate]
+ if v is not None
+ )
+ return TableTransformerDecoderOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ cross_attentions=all_cross_attentions,
+ intermediate_hidden_states=intermediate,
+ )
+
+
+@add_start_docstrings(
+ """
+ The bare Table Transformer Model (consisting of a backbone and encoder-decoder Transformer) outputting raw
+ hidden-states without any specific head on top.
+ """,
+ TABLE_TRANSFORMER_START_DOCSTRING,
+)
+class TableTransformerModel(TableTransformerPreTrainedModel):
+ # Copied from transformers.models.detr.modeling_detr.DetrModel.__init__ with Detr->TableTransformer
+ def __init__(self, config: TableTransformerConfig):
+ super().__init__(config)
+
+ # Create backbone + positional encoding
+ backbone = TableTransformerConvEncoder(config)
+ object_queries = build_position_encoding(config)
+ self.backbone = TableTransformerConvModel(backbone, object_queries)
+
+ # Create projection layer
+ self.input_projection = nn.Conv2d(backbone.intermediate_channel_sizes[-1], config.d_model, kernel_size=1)
+
+ self.query_position_embeddings = nn.Embedding(config.num_queries, config.d_model)
+
+ self.encoder = TableTransformerEncoder(config)
+ self.decoder = TableTransformerDecoder(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_encoder(self):
+ return self.encoder
+
+ def get_decoder(self):
+ return self.decoder
+
+ def freeze_backbone(self):
+ for name, param in self.backbone.conv_encoder.model.named_parameters():
+ param.requires_grad_(False)
+
+ def unfreeze_backbone(self):
+ for name, param in self.backbone.conv_encoder.model.named_parameters():
+ param.requires_grad_(True)
+
+ @add_start_docstrings_to_model_forward(TABLE_TRANSFORMER_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TableTransformerModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: torch.FloatTensor,
+ pixel_mask: Optional[torch.FloatTensor] = None,
+ decoder_attention_mask: Optional[torch.FloatTensor] = None,
+ encoder_outputs: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.FloatTensor], TableTransformerModelOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, TableTransformerModel
+ >>> from huggingface_hub import hf_hub_download
+ >>> from PIL import Image
+
+ >>> file_path = hf_hub_download(repo_id="nielsr/example-pdf", repo_type="dataset", filename="example_pdf.png")
+ >>> image = Image.open(file_path).convert("RGB")
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/table-transformer-detection")
+ >>> model = TableTransformerModel.from_pretrained("microsoft/table-transformer-detection")
+
+ >>> # prepare image for the model
+ >>> inputs = image_processor(images=image, return_tensors="pt")
+
+ >>> # forward pass
+ >>> outputs = model(**inputs)
+
+ >>> # the last hidden states are the final query embeddings of the Transformer decoder
+ >>> # these are of shape (batch_size, num_queries, hidden_size)
+ >>> last_hidden_states = outputs.last_hidden_state
+ >>> list(last_hidden_states.shape)
+ [1, 15, 256]
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ batch_size, num_channels, height, width = pixel_values.shape
+ device = pixel_values.device
+
+ if pixel_mask is None:
+ pixel_mask = torch.ones(((batch_size, height, width)), device=device)
+
+ # First, sent pixel_values + pixel_mask through Backbone to obtain the features
+ # pixel_values should be of shape (batch_size, num_channels, height, width)
+ # pixel_mask should be of shape (batch_size, height, width)
+ features, position_embeddings_list = self.backbone(pixel_values, pixel_mask)
+
+ # get final feature map and downsampled mask
+ feature_map, mask = features[-1]
+
+ if mask is None:
+ raise ValueError("Backbone does not return downsampled pixel mask")
+
+ # Second, apply 1x1 convolution to reduce the channel dimension to d_model (256 by default)
+ projected_feature_map = self.input_projection(feature_map)
+
+ # Third, flatten the feature map + object queries of shape NxCxHxW to NxCxHW, and permute it to NxHWxC
+ # In other words, turn their shape into (batch_size, sequence_length, hidden_size)
+ flattened_features = projected_feature_map.flatten(2).permute(0, 2, 1)
+ object_queries = position_embeddings_list[-1].flatten(2).permute(0, 2, 1)
+
+ flattened_mask = mask.flatten(1)
+
+ # Fourth, sent flattened_features + flattened_mask + object queries through encoder
+ # flattened_features is a Tensor of shape (batch_size, heigth*width, hidden_size)
+ # flattened_mask is a Tensor of shape (batch_size, heigth*width)
+ if encoder_outputs is None:
+ encoder_outputs = self.encoder(
+ inputs_embeds=flattened_features,
+ attention_mask=flattened_mask,
+ object_queries=object_queries,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ # Fifth, sent query embeddings + object queries through the decoder (which is conditioned on the encoder output)
+ query_position_embeddings = self.query_position_embeddings.weight.unsqueeze(0).repeat(batch_size, 1, 1)
+ queries = torch.zeros_like(query_position_embeddings)
+
+ # decoder outputs consists of (dec_features, dec_hidden, dec_attn)
+ decoder_outputs = self.decoder(
+ inputs_embeds=queries,
+ attention_mask=None,
+ object_queries=object_queries,
+ query_position_embeddings=query_position_embeddings,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=flattened_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+
+ return TableTransformerModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ intermediate_hidden_states=decoder_outputs.intermediate_hidden_states,
+ )
+
+
+@add_start_docstrings(
+ """
+ Table Transformer Model (consisting of a backbone and encoder-decoder Transformer) with object detection heads on
+ top, for tasks such as COCO detection.
+ """,
+ TABLE_TRANSFORMER_START_DOCSTRING,
+)
+class TableTransformerForObjectDetection(TableTransformerPreTrainedModel):
+ # Copied from transformers.models.detr.modeling_detr.DetrForObjectDetection.__init__ with Detr->TableTransformer
+ def __init__(self, config: TableTransformerConfig):
+ super().__init__(config)
+
+ # DETR encoder-decoder model
+ self.model = TableTransformerModel(config)
+
+ # Object detection heads
+ self.class_labels_classifier = nn.Linear(
+ config.d_model, config.num_labels + 1
+ ) # We add one for the "no object" class
+ self.bbox_predictor = TableTransformerMLPPredictionHead(
+ input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @torch.jit.unused
+ # Copied from transformers.models.detr.modeling_detr.DetrForObjectDetection._set_aux_loss
+ def _set_aux_loss(self, outputs_class, outputs_coord):
+ # this is a workaround to make torchscript happy, as torchscript
+ # doesn't support dictionary with non-homogeneous values, such
+ # as a dict having both a Tensor and a list.
+ return [{"logits": a, "pred_boxes": b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]
+
+ @add_start_docstrings_to_model_forward(TABLE_TRANSFORMER_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TableTransformerObjectDetectionOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: torch.FloatTensor,
+ pixel_mask: Optional[torch.FloatTensor] = None,
+ decoder_attention_mask: Optional[torch.FloatTensor] = None,
+ encoder_outputs: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[List[Dict]] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.FloatTensor], TableTransformerObjectDetectionOutput]:
+ r"""
+ labels (`List[Dict]` of len `(batch_size,)`, *optional*):
+ Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the
+ following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch
+ respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes
+ in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`.
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from huggingface_hub import hf_hub_download
+ >>> from transformers import AutoImageProcessor, TableTransformerForObjectDetection
+ >>> import torch
+ >>> from PIL import Image
+
+ >>> file_path = hf_hub_download(repo_id="nielsr/example-pdf", repo_type="dataset", filename="example_pdf.png")
+ >>> image = Image.open(file_path).convert("RGB")
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/table-transformer-detection")
+ >>> model = TableTransformerForObjectDetection.from_pretrained("microsoft/table-transformer-detection")
+
+ >>> inputs = image_processor(images=image, return_tensors="pt")
+ >>> outputs = model(**inputs)
+
+ >>> # convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax)
+ >>> target_sizes = torch.tensor([image.size[::-1]])
+ >>> results = image_processor.post_process_object_detection(outputs, threshold=0.9, target_sizes=target_sizes)[
+ ... 0
+ ... ]
+
+ >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
+ ... box = [round(i, 2) for i in box.tolist()]
+ ... print(
+ ... f"Detected {model.config.id2label[label.item()]} with confidence "
+ ... f"{round(score.item(), 3)} at location {box}"
+ ... )
+ Detected table with confidence 1.0 at location [202.1, 210.59, 1119.22, 385.09]
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # First, sent images through TABLE_TRANSFORMER base model to obtain encoder + decoder outputs
+ outputs = self.model(
+ pixel_values,
+ pixel_mask=pixel_mask,
+ decoder_attention_mask=decoder_attention_mask,
+ encoder_outputs=encoder_outputs,
+ inputs_embeds=inputs_embeds,
+ decoder_inputs_embeds=decoder_inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ # class logits + predicted bounding boxes
+ logits = self.class_labels_classifier(sequence_output)
+ pred_boxes = self.bbox_predictor(sequence_output).sigmoid()
+
+ loss, loss_dict, auxiliary_outputs = None, None, None
+ if labels is not None:
+ # First: create the matcher
+ matcher = TableTransformerHungarianMatcher(
+ class_cost=self.config.class_cost, bbox_cost=self.config.bbox_cost, giou_cost=self.config.giou_cost
+ )
+ # Second: create the criterion
+ losses = ["labels", "boxes", "cardinality"]
+ criterion = TableTransformerLoss(
+ matcher=matcher,
+ num_classes=self.config.num_labels,
+ eos_coef=self.config.eos_coefficient,
+ losses=losses,
+ )
+ criterion.to(self.device)
+ # Third: compute the losses, based on outputs and labels
+ outputs_loss = {}
+ outputs_loss["logits"] = logits
+ outputs_loss["pred_boxes"] = pred_boxes
+ if self.config.auxiliary_loss:
+ intermediate = outputs.intermediate_hidden_states if return_dict else outputs[4]
+ outputs_class = self.class_labels_classifier(intermediate)
+ outputs_coord = self.bbox_predictor(intermediate).sigmoid()
+ auxiliary_outputs = self._set_aux_loss(outputs_class, outputs_coord)
+ outputs_loss["auxiliary_outputs"] = auxiliary_outputs
+
+ loss_dict = criterion(outputs_loss, labels)
+ # Fourth: compute total loss, as a weighted sum of the various losses
+ weight_dict = {"loss_ce": 1, "loss_bbox": self.config.bbox_loss_coefficient}
+ weight_dict["loss_giou"] = self.config.giou_loss_coefficient
+ if self.config.auxiliary_loss:
+ aux_weight_dict = {}
+ for i in range(self.config.decoder_layers - 1):
+ aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()})
+ weight_dict.update(aux_weight_dict)
+ loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
+
+ if not return_dict:
+ if auxiliary_outputs is not None:
+ output = (logits, pred_boxes) + auxiliary_outputs + outputs
+ else:
+ output = (logits, pred_boxes) + outputs
+ return ((loss, loss_dict) + output) if loss is not None else output
+
+ return TableTransformerObjectDetectionOutput(
+ loss=loss,
+ loss_dict=loss_dict,
+ logits=logits,
+ pred_boxes=pred_boxes,
+ auxiliary_outputs=auxiliary_outputs,
+ last_hidden_state=outputs.last_hidden_state,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ )
+
+
+# Copied from transformers.models.detr.modeling_detr.dice_loss
+def dice_loss(inputs, targets, num_boxes):
+ """
+ Compute the DICE loss, similar to generalized IOU for masks
+
+ Args:
+ inputs: A float tensor of arbitrary shape.
+ The predictions for each example.
+ targets: A float tensor with the same shape as inputs. Stores the binary
+ classification label for each element in inputs (0 for the negative class and 1 for the positive
+ class).
+ """
+ inputs = inputs.sigmoid()
+ inputs = inputs.flatten(1)
+ numerator = 2 * (inputs * targets).sum(1)
+ denominator = inputs.sum(-1) + targets.sum(-1)
+ loss = 1 - (numerator + 1) / (denominator + 1)
+ return loss.sum() / num_boxes
+
+
+# Copied from transformers.models.detr.modeling_detr.sigmoid_focal_loss
+def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):
+ """
+ Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
+
+ Args:
+ inputs (`torch.FloatTensor` of arbitrary shape):
+ The predictions for each example.
+ targets (`torch.FloatTensor` with the same shape as `inputs`)
+ A tensor storing the binary classification label for each element in the `inputs` (0 for the negative class
+ and 1 for the positive class).
+ alpha (`float`, *optional*, defaults to `0.25`):
+ Optional weighting factor in the range (0,1) to balance positive vs. negative examples.
+ gamma (`int`, *optional*, defaults to `2`):
+ Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples.
+
+ Returns:
+ Loss tensor
+ """
+ prob = inputs.sigmoid()
+ ce_loss = nn.functional.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
+ # add modulating factor
+ p_t = prob * targets + (1 - prob) * (1 - targets)
+ loss = ce_loss * ((1 - p_t) ** gamma)
+
+ if alpha >= 0:
+ alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
+ loss = alpha_t * loss
+
+ return loss.mean(1).sum() / num_boxes
+
+
+# Copied from transformers.models.detr.modeling_detr.DetrLoss with Detr->TableTransformer,detr->table_transformer
+class TableTransformerLoss(nn.Module):
+ """
+ This class computes the losses for TableTransformerForObjectDetection/TableTransformerForSegmentation. The process happens in two steps: 1)
+ we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair
+ of matched ground-truth / prediction (supervise class and box).
+
+ A note on the `num_classes` argument (copied from original repo in table_transformer.py): "the naming of the `num_classes`
+ parameter of the criterion is somewhat misleading. It indeed corresponds to `max_obj_id` + 1, where `max_obj_id` is
+ the maximum id for a class in your dataset. For example, COCO has a `max_obj_id` of 90, so we pass `num_classes` to
+ be 91. As another example, for a dataset that has a single class with `id` 1, you should pass `num_classes` to be 2
+ (`max_obj_id` + 1). For more details on this, check the following discussion
+ https://github.com/facebookresearch/table_transformer/issues/108#issuecomment-650269223"
+
+
+ Args:
+ matcher (`TableTransformerHungarianMatcher`):
+ Module able to compute a matching between targets and proposals.
+ num_classes (`int`):
+ Number of object categories, omitting the special no-object category.
+ eos_coef (`float`):
+ Relative classification weight applied to the no-object category.
+ losses (`List[str]`):
+ List of all the losses to be applied. See `get_loss` for a list of all available losses.
+ """
+
+ def __init__(self, matcher, num_classes, eos_coef, losses):
+ super().__init__()
+ self.matcher = matcher
+ self.num_classes = num_classes
+ self.eos_coef = eos_coef
+ self.losses = losses
+ empty_weight = torch.ones(self.num_classes + 1)
+ empty_weight[-1] = self.eos_coef
+ self.register_buffer("empty_weight", empty_weight)
+
+ # removed logging parameter, which was part of the original implementation
+ def loss_labels(self, outputs, targets, indices, num_boxes):
+ """
+ Classification loss (NLL) targets dicts must contain the key "class_labels" containing a tensor of dim
+ [nb_target_boxes]
+ """
+ if "logits" not in outputs:
+ raise KeyError("No logits were found in the outputs")
+ source_logits = outputs["logits"]
+
+ idx = self._get_source_permutation_idx(indices)
+ target_classes_o = torch.cat([t["class_labels"][J] for t, (_, J) in zip(targets, indices)])
+ target_classes = torch.full(
+ source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device
+ )
+ target_classes[idx] = target_classes_o
+
+ loss_ce = nn.functional.cross_entropy(source_logits.transpose(1, 2), target_classes, self.empty_weight)
+ losses = {"loss_ce": loss_ce}
+
+ return losses
+
+ @torch.no_grad()
+ def loss_cardinality(self, outputs, targets, indices, num_boxes):
+ """
+ Compute the cardinality error, i.e. the absolute error in the number of predicted non-empty boxes.
+
+ This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients.
+ """
+ logits = outputs["logits"]
+ device = logits.device
+ target_lengths = torch.as_tensor([len(v["class_labels"]) for v in targets], device=device)
+ # Count the number of predictions that are NOT "no-object" (which is the last class)
+ card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1)
+ card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float())
+ losses = {"cardinality_error": card_err}
+ return losses
+
+ def loss_boxes(self, outputs, targets, indices, num_boxes):
+ """
+ Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss.
+
+ Targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]. The target boxes
+ are expected in format (center_x, center_y, w, h), normalized by the image size.
+ """
+ if "pred_boxes" not in outputs:
+ raise KeyError("No predicted boxes found in outputs")
+ idx = self._get_source_permutation_idx(indices)
+ source_boxes = outputs["pred_boxes"][idx]
+ target_boxes = torch.cat([t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0)
+
+ loss_bbox = nn.functional.l1_loss(source_boxes, target_boxes, reduction="none")
+
+ losses = {}
+ losses["loss_bbox"] = loss_bbox.sum() / num_boxes
+
+ loss_giou = 1 - torch.diag(
+ generalized_box_iou(center_to_corners_format(source_boxes), center_to_corners_format(target_boxes))
+ )
+ losses["loss_giou"] = loss_giou.sum() / num_boxes
+ return losses
+
+ def loss_masks(self, outputs, targets, indices, num_boxes):
+ """
+ Compute the losses related to the masks: the focal loss and the dice loss.
+
+ Targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w].
+ """
+ if "pred_masks" not in outputs:
+ raise KeyError("No predicted masks found in outputs")
+
+ source_idx = self._get_source_permutation_idx(indices)
+ target_idx = self._get_target_permutation_idx(indices)
+ source_masks = outputs["pred_masks"]
+ source_masks = source_masks[source_idx]
+ masks = [t["masks"] for t in targets]
+ # TODO use valid to mask invalid areas due to padding in loss
+ target_masks, valid = nested_tensor_from_tensor_list(masks).decompose()
+ target_masks = target_masks.to(source_masks)
+ target_masks = target_masks[target_idx]
+
+ # upsample predictions to the target size
+ source_masks = nn.functional.interpolate(
+ source_masks[:, None], size=target_masks.shape[-2:], mode="bilinear", align_corners=False
+ )
+ source_masks = source_masks[:, 0].flatten(1)
+
+ target_masks = target_masks.flatten(1)
+ target_masks = target_masks.view(source_masks.shape)
+ losses = {
+ "loss_mask": sigmoid_focal_loss(source_masks, target_masks, num_boxes),
+ "loss_dice": dice_loss(source_masks, target_masks, num_boxes),
+ }
+ return losses
+
+ def _get_source_permutation_idx(self, indices):
+ # permute predictions following indices
+ batch_idx = torch.cat([torch.full_like(source, i) for i, (source, _) in enumerate(indices)])
+ source_idx = torch.cat([source for (source, _) in indices])
+ return batch_idx, source_idx
+
+ def _get_target_permutation_idx(self, indices):
+ # permute targets following indices
+ batch_idx = torch.cat([torch.full_like(target, i) for i, (_, target) in enumerate(indices)])
+ target_idx = torch.cat([target for (_, target) in indices])
+ return batch_idx, target_idx
+
+ def get_loss(self, loss, outputs, targets, indices, num_boxes):
+ loss_map = {
+ "labels": self.loss_labels,
+ "cardinality": self.loss_cardinality,
+ "boxes": self.loss_boxes,
+ "masks": self.loss_masks,
+ }
+ if loss not in loss_map:
+ raise ValueError(f"Loss {loss} not supported")
+ return loss_map[loss](outputs, targets, indices, num_boxes)
+
+ def forward(self, outputs, targets):
+ """
+ This performs the loss computation.
+
+ Args:
+ outputs (`dict`, *optional*):
+ Dictionary of tensors, see the output specification of the model for the format.
+ targets (`List[dict]`, *optional*):
+ List of dicts, such that `len(targets) == batch_size`. The expected keys in each dict depends on the
+ losses applied, see each loss' doc.
+ """
+ outputs_without_aux = {k: v for k, v in outputs.items() if k != "auxiliary_outputs"}
+
+ # Retrieve the matching between the outputs of the last layer and the targets
+ indices = self.matcher(outputs_without_aux, targets)
+
+ # Compute the average number of target boxes across all nodes, for normalization purposes
+ num_boxes = sum(len(t["class_labels"]) for t in targets)
+ num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)
+ world_size = 1
+ if is_accelerate_available():
+ if PartialState._shared_state != {}:
+ num_boxes = reduce(num_boxes)
+ world_size = PartialState().num_processes
+ num_boxes = torch.clamp(num_boxes / world_size, min=1).item()
+
+ # Compute all the requested losses
+ losses = {}
+ for loss in self.losses:
+ losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))
+
+ # In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
+ if "auxiliary_outputs" in outputs:
+ for i, auxiliary_outputs in enumerate(outputs["auxiliary_outputs"]):
+ indices = self.matcher(auxiliary_outputs, targets)
+ for loss in self.losses:
+ if loss == "masks":
+ # Intermediate masks losses are too costly to compute, we ignore them.
+ continue
+ l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes)
+ l_dict = {k + f"_{i}": v for k, v in l_dict.items()}
+ losses.update(l_dict)
+
+ return losses
+
+
+# Copied from transformers.models.detr.modeling_detr.DetrMLPPredictionHead with Detr->TableTransformer,detr->table_transformer
+class TableTransformerMLPPredictionHead(nn.Module):
+ """
+ Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates,
+ height and width of a bounding box w.r.t. an image.
+
+ Copied from https://github.com/facebookresearch/table_transformer/blob/master/models/table_transformer.py
+
+ """
+
+ def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
+ super().__init__()
+ self.num_layers = num_layers
+ h = [hidden_dim] * (num_layers - 1)
+ self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
+
+ def forward(self, x):
+ for i, layer in enumerate(self.layers):
+ x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
+ return x
+
+
+# Copied from transformers.models.detr.modeling_detr.DetrHungarianMatcher with Detr->TableTransformer
+class TableTransformerHungarianMatcher(nn.Module):
+ """
+ This class computes an assignment between the targets and the predictions of the network.
+
+ For efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more
+ predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are
+ un-matched (and thus treated as non-objects).
+
+ Args:
+ class_cost:
+ The relative weight of the classification error in the matching cost.
+ bbox_cost:
+ The relative weight of the L1 error of the bounding box coordinates in the matching cost.
+ giou_cost:
+ The relative weight of the giou loss of the bounding box in the matching cost.
+ """
+
+ def __init__(self, class_cost: float = 1, bbox_cost: float = 1, giou_cost: float = 1):
+ super().__init__()
+ requires_backends(self, ["scipy"])
+
+ self.class_cost = class_cost
+ self.bbox_cost = bbox_cost
+ self.giou_cost = giou_cost
+ if class_cost == 0 and bbox_cost == 0 and giou_cost == 0:
+ raise ValueError("All costs of the Matcher can't be 0")
+
+ @torch.no_grad()
+ def forward(self, outputs, targets):
+ """
+ Args:
+ outputs (`dict`):
+ A dictionary that contains at least these entries:
+ * "logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
+ * "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates.
+ targets (`List[dict]`):
+ A list of targets (len(targets) = batch_size), where each target is a dict containing:
+ * "class_labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of
+ ground-truth
+ objects in the target) containing the class labels
+ * "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates.
+
+ Returns:
+ `List[Tuple]`: A list of size `batch_size`, containing tuples of (index_i, index_j) where:
+ - index_i is the indices of the selected predictions (in order)
+ - index_j is the indices of the corresponding selected targets (in order)
+ For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
+ """
+ batch_size, num_queries = outputs["logits"].shape[:2]
+
+ # We flatten to compute the cost matrices in a batch
+ out_prob = outputs["logits"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes]
+ out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4]
+
+ # Also concat the target labels and boxes
+ target_ids = torch.cat([v["class_labels"] for v in targets])
+ target_bbox = torch.cat([v["boxes"] for v in targets])
+
+ # Compute the classification cost. Contrary to the loss, we don't use the NLL,
+ # but approximate it in 1 - proba[target class].
+ # The 1 is a constant that doesn't change the matching, it can be ommitted.
+ class_cost = -out_prob[:, target_ids]
+
+ # Compute the L1 cost between boxes
+ bbox_cost = torch.cdist(out_bbox, target_bbox, p=1)
+
+ # Compute the giou cost between boxes
+ giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox))
+
+ # Final cost matrix
+ cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost
+ cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu()
+
+ sizes = [len(v["boxes"]) for v in targets]
+ indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))]
+ return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
+
+
+# Copied from transformers.models.detr.modeling_detr._upcast
+def _upcast(t: Tensor) -> Tensor:
+ # Protects from numerical overflows in multiplications by upcasting to the equivalent higher type
+ if t.is_floating_point():
+ return t if t.dtype in (torch.float32, torch.float64) else t.float()
+ else:
+ return t if t.dtype in (torch.int32, torch.int64) else t.int()
+
+
+# Copied from transformers.models.detr.modeling_detr.box_area
+def box_area(boxes: Tensor) -> Tensor:
+ """
+ Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates.
+
+ Args:
+ boxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`):
+ Boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with `0 <= x1
+ < x2` and `0 <= y1 < y2`.
+
+ Returns:
+ `torch.FloatTensor`: a tensor containing the area for each box.
+ """
+ boxes = _upcast(boxes)
+ return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
+
+
+# Copied from transformers.models.detr.modeling_detr.box_iou
+def box_iou(boxes1, boxes2):
+ area1 = box_area(boxes1)
+ area2 = box_area(boxes2)
+
+ left_top = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
+ right_bottom = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
+
+ width_height = (right_bottom - left_top).clamp(min=0) # [N,M,2]
+ inter = width_height[:, :, 0] * width_height[:, :, 1] # [N,M]
+
+ union = area1[:, None] + area2 - inter
+
+ iou = inter / union
+ return iou, union
+
+
+# Copied from transformers.models.detr.modeling_detr.generalized_box_iou
+def generalized_box_iou(boxes1, boxes2):
+ """
+ Generalized IoU from https://giou.stanford.edu/. The boxes should be in [x0, y0, x1, y1] (corner) format.
+
+ Returns:
+ `torch.FloatTensor`: a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2)
+ """
+ # degenerate boxes gives inf / nan results
+ # so do an early check
+ if not (boxes1[:, 2:] >= boxes1[:, :2]).all():
+ raise ValueError(f"boxes1 must be in [x0, y0, x1, y1] (corner) format, but got {boxes1}")
+ if not (boxes2[:, 2:] >= boxes2[:, :2]).all():
+ raise ValueError(f"boxes2 must be in [x0, y0, x1, y1] (corner) format, but got {boxes2}")
+ iou, union = box_iou(boxes1, boxes2)
+
+ top_left = torch.min(boxes1[:, None, :2], boxes2[:, :2])
+ bottom_right = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
+
+ width_height = (bottom_right - top_left).clamp(min=0) # [N,M,2]
+ area = width_height[:, :, 0] * width_height[:, :, 1]
+
+ return iou - (area - union) / area
+
+
+# Copied from transformers.models.detr.modeling_detr._max_by_axis
+def _max_by_axis(the_list):
+ # type: (List[List[int]]) -> List[int]
+ maxes = the_list[0]
+ for sublist in the_list[1:]:
+ for index, item in enumerate(sublist):
+ maxes[index] = max(maxes[index], item)
+ return maxes
+
+
+# Copied from transformers.models.detr.modeling_detr.NestedTensor
+class NestedTensor(object):
+ def __init__(self, tensors, mask: Optional[Tensor]):
+ self.tensors = tensors
+ self.mask = mask
+
+ def to(self, device):
+ cast_tensor = self.tensors.to(device)
+ mask = self.mask
+ if mask is not None:
+ cast_mask = mask.to(device)
+ else:
+ cast_mask = None
+ return NestedTensor(cast_tensor, cast_mask)
+
+ def decompose(self):
+ return self.tensors, self.mask
+
+ def __repr__(self):
+ return str(self.tensors)
+
+
+# Copied from transformers.models.detr.modeling_detr.nested_tensor_from_tensor_list
+def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
+ if tensor_list[0].ndim == 3:
+ max_size = _max_by_axis([list(img.shape) for img in tensor_list])
+ batch_shape = [len(tensor_list)] + max_size
+ batch_size, num_channels, height, width = batch_shape
+ dtype = tensor_list[0].dtype
+ device = tensor_list[0].device
+ tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
+ mask = torch.ones((batch_size, height, width), dtype=torch.bool, device=device)
+ for img, pad_img, m in zip(tensor_list, tensor, mask):
+ pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
+ m[: img.shape[1], : img.shape[2]] = False
+ else:
+ raise ValueError("Only 3-dimensional tensors are supported")
+ return NestedTensor(tensor, mask)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f5e1d4568a66a4864af0d991f7ddf05cf5857bd0
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/__init__.py
@@ -0,0 +1,142 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_sentencepiece_available,
+ is_tf_available,
+ is_tokenizers_available,
+ is_torch_available,
+)
+
+
+_import_structure = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
+
+try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_xlnet"] = ["XLNetTokenizer"]
+
+try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_xlnet_fast"] = ["XLNetTokenizerFast"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_xlnet"] = [
+ "XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "XLNetForMultipleChoice",
+ "XLNetForQuestionAnswering",
+ "XLNetForQuestionAnsweringSimple",
+ "XLNetForSequenceClassification",
+ "XLNetForTokenClassification",
+ "XLNetLMHeadModel",
+ "XLNetModel",
+ "XLNetPreTrainedModel",
+ "load_tf_weights_in_xlnet",
+ ]
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_xlnet"] = [
+ "TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TFXLNetForMultipleChoice",
+ "TFXLNetForQuestionAnsweringSimple",
+ "TFXLNetForSequenceClassification",
+ "TFXLNetForTokenClassification",
+ "TFXLNetLMHeadModel",
+ "TFXLNetMainLayer",
+ "TFXLNetModel",
+ "TFXLNetPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
+
+ try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_xlnet import XLNetTokenizer
+
+ try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_xlnet_fast import XLNetTokenizerFast
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_xlnet import (
+ XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
+ XLNetForMultipleChoice,
+ XLNetForQuestionAnswering,
+ XLNetForQuestionAnsweringSimple,
+ XLNetForSequenceClassification,
+ XLNetForTokenClassification,
+ XLNetLMHeadModel,
+ XLNetModel,
+ XLNetPreTrainedModel,
+ load_tf_weights_in_xlnet,
+ )
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_xlnet import (
+ TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TFXLNetForMultipleChoice,
+ TFXLNetForQuestionAnsweringSimple,
+ TFXLNetForSequenceClassification,
+ TFXLNetForTokenClassification,
+ TFXLNetLMHeadModel,
+ TFXLNetMainLayer,
+ TFXLNetModel,
+ TFXLNetPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..40cc063e0188526a95ffe52298ff4fff3cf9b4a0
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/__pycache__/configuration_xlnet.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/__pycache__/configuration_xlnet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5dee771568c6ae60ea12769100d7ca7960a8a1e8
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/__pycache__/configuration_xlnet.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/__pycache__/convert_xlnet_original_tf_checkpoint_to_pytorch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/__pycache__/convert_xlnet_original_tf_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7fd6871b2f0a119689c070c00ef7ea85a76525ff
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/__pycache__/convert_xlnet_original_tf_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/__pycache__/modeling_tf_xlnet.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/__pycache__/modeling_tf_xlnet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3d748cc475eb56e00814030c1f958327448e1a90
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/__pycache__/modeling_tf_xlnet.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/__pycache__/modeling_xlnet.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/__pycache__/modeling_xlnet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1865536ad3ca58d28e2508be528ae4b6f48c426f
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/__pycache__/modeling_xlnet.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/__pycache__/tokenization_xlnet.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/__pycache__/tokenization_xlnet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9ee3b31427a9021c9f7fb9941ee2b8e39a253e09
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/__pycache__/tokenization_xlnet.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/__pycache__/tokenization_xlnet_fast.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/__pycache__/tokenization_xlnet_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..50ea0cd0c6206bcc10a07a75f260ddfdd91f3fea
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/__pycache__/tokenization_xlnet_fast.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/configuration_xlnet.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/configuration_xlnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..f81c456b61df69163e0bd52d496e889a94e99bad
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/configuration_xlnet.py
@@ -0,0 +1,240 @@
+# coding=utf-8
+# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" XLNet configuration"""
+
+import warnings
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class XLNetConfig(PretrainedConfig):
+ """
+ This is the configuration class to store the configuration of a [`XLNetModel`] or a [`TFXLNetModel`]. It is used to
+ instantiate a XLNet model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the
+ [xlnet/xlnet-large-cased](https://huggingface.co/xlnet/xlnet-large-cased) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 32000):
+ Vocabulary size of the XLNet model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`XLNetModel`] or [`TFXLNetModel`].
+ d_model (`int`, *optional*, defaults to 1024):
+ Dimensionality of the encoder layers and the pooler layer.
+ n_layer (`int`, *optional*, defaults to 24):
+ Number of hidden layers in the Transformer encoder.
+ n_head (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ d_inner (`int`, *optional*, defaults to 4096):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
+ ff_activation (`str` or `Callable`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the If string, `"gelu"`, `"relu"`, `"silu"` and
+ `"gelu_new"` are supported.
+ untie_r (`bool`, *optional*, defaults to `True`):
+ Whether or not to untie relative position biases
+ attn_type (`str`, *optional*, defaults to `"bi"`):
+ The attention type used by the model. Set `"bi"` for XLNet, `"uni"` for Transformer-XL.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ mem_len (`int` or `None`, *optional*):
+ The number of tokens to cache. The key/value pairs that have already been pre-computed in a previous
+ forward pass won't be re-computed. See the
+ [quickstart](https://huggingface.co/transformers/quickstart.html#using-the-past) for more information.
+ reuse_len (`int`, *optional*):
+ The number of tokens in the current batch to be cached and reused in the future.
+ bi_data (`bool`, *optional*, defaults to `False`):
+ Whether or not to use bidirectional input pipeline. Usually set to `True` during pretraining and `False`
+ during finetuning.
+ clamp_len (`int`, *optional*, defaults to -1):
+ Clamp all relative distances larger than clamp_len. Setting this attribute to -1 means no clamping.
+ same_length (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the same attention length for each token.
+ summary_type (`str`, *optional*, defaults to "last"):
+ Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
+
+ Has to be one of the following options:
+
+ - `"last"`: Take the last token hidden state (like XLNet).
+ - `"first"`: Take the first token hidden state (like BERT).
+ - `"mean"`: Take the mean of all tokens hidden states.
+ - `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
+ - `"attn"`: Not implemented now, use multi-head attention.
+ summary_use_proj (`bool`, *optional*, defaults to `True`):
+ Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
+
+ Whether or not to add a projection after the vector extraction.
+ summary_activation (`str`, *optional*):
+ Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
+
+ Pass `"tanh"` for a tanh activation to the output, any other value will result in no activation.
+ summary_proj_to_labels (`boo`, *optional*, defaults to `True`):
+ Used in the sequence classification and multiple choice models.
+
+ Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes.
+ summary_last_dropout (`float`, *optional*, defaults to 0.1):
+ Used in the sequence classification and multiple choice models.
+
+ The dropout ratio to be used after the projection and activation.
+ start_n_top (`int`, *optional*, defaults to 5):
+ Used in the SQuAD evaluation script.
+ end_n_top (`int`, *optional*, defaults to 5):
+ Used in the SQuAD evaluation script.
+ use_mems_eval (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should make use of the recurrent memory mechanism in evaluation mode.
+ use_mems_train (`bool`, *optional*, defaults to `False`):
+ Whether or not the model should make use of the recurrent memory mechanism in train mode.
+
+
+
+ For pretraining, it is recommended to set `use_mems_train` to `True`. For fine-tuning, it is recommended to
+ set `use_mems_train` to `False` as discussed
+ [here](https://github.com/zihangdai/xlnet/issues/41#issuecomment-505102587). If `use_mems_train` is set to
+ `True`, one has to make sure that the train batches are correctly pre-processed, *e.g.* `batch_1 = [[This
+ line is], [This is the]]` and `batch_2 = [[ the first line], [ second line]]` and that all batches are of
+ equal size.
+
+
+
+ Examples:
+
+ ```python
+ >>> from transformers import XLNetConfig, XLNetModel
+
+ >>> # Initializing a XLNet configuration
+ >>> configuration = XLNetConfig()
+
+ >>> # Initializing a model (with random weights) from the configuration
+ >>> model = XLNetModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "xlnet"
+ keys_to_ignore_at_inference = ["mems"]
+ attribute_map = {
+ "n_token": "vocab_size", # Backward compatibility
+ "hidden_size": "d_model",
+ "num_attention_heads": "n_head",
+ "num_hidden_layers": "n_layer",
+ }
+
+ def __init__(
+ self,
+ vocab_size=32000,
+ d_model=1024,
+ n_layer=24,
+ n_head=16,
+ d_inner=4096,
+ ff_activation="gelu",
+ untie_r=True,
+ attn_type="bi",
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ dropout=0.1,
+ mem_len=512,
+ reuse_len=None,
+ use_mems_eval=True,
+ use_mems_train=False,
+ bi_data=False,
+ clamp_len=-1,
+ same_length=False,
+ summary_type="last",
+ summary_use_proj=True,
+ summary_activation="tanh",
+ summary_last_dropout=0.1,
+ start_n_top=5,
+ end_n_top=5,
+ pad_token_id=5,
+ bos_token_id=1,
+ eos_token_id=2,
+ **kwargs,
+ ):
+ """Constructs XLNetConfig."""
+ self.vocab_size = vocab_size
+ self.d_model = d_model
+ self.n_layer = n_layer
+ self.n_head = n_head
+ if d_model % n_head != 0:
+ raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0")
+ if "d_head" in kwargs:
+ if kwargs["d_head"] != d_model // n_head:
+ raise ValueError(
+ f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})"
+ )
+ self.d_head = d_model // n_head
+ self.ff_activation = ff_activation
+ self.d_inner = d_inner
+ self.untie_r = untie_r
+ self.attn_type = attn_type
+
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+
+ self.dropout = dropout
+ self.mem_len = mem_len
+ self.reuse_len = reuse_len
+ self.bi_data = bi_data
+ self.clamp_len = clamp_len
+ self.same_length = same_length
+
+ self.summary_type = summary_type
+ self.summary_use_proj = summary_use_proj
+ self.summary_activation = summary_activation
+ self.summary_last_dropout = summary_last_dropout
+ self.start_n_top = start_n_top
+ self.end_n_top = end_n_top
+
+ self.bos_token_id = bos_token_id
+ self.pad_token_id = pad_token_id
+ self.eos_token_id = eos_token_id
+
+ if "use_cache" in kwargs:
+ warnings.warn(
+ "The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
+ " instead.",
+ FutureWarning,
+ )
+ use_mems_eval = kwargs["use_cache"]
+
+ self.use_mems_eval = use_mems_eval
+ self.use_mems_train = use_mems_train
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
+
+ @property
+ def max_position_embeddings(self):
+ logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit.")
+ return -1
+
+ @max_position_embeddings.setter
+ def max_position_embeddings(self, value):
+ # Message copied from Transformer-XL documentation
+ raise NotImplementedError(
+ f"The model {self.model_type} is one of the few models that has no sequence length limit."
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/convert_xlnet_original_tf_checkpoint_to_pytorch.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/convert_xlnet_original_tf_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..804b52b0dc87924fa5ee3eda7aa56e875d075a22
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/convert_xlnet_original_tf_checkpoint_to_pytorch.py
@@ -0,0 +1,114 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert BERT checkpoint."""
+
+
+import argparse
+import os
+
+import torch
+
+from transformers import (
+ XLNetConfig,
+ XLNetForQuestionAnswering,
+ XLNetForSequenceClassification,
+ XLNetLMHeadModel,
+ load_tf_weights_in_xlnet,
+)
+from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
+
+
+GLUE_TASKS_NUM_LABELS = {
+ "cola": 2,
+ "mnli": 3,
+ "mrpc": 2,
+ "sst-2": 2,
+ "sts-b": 1,
+ "qqp": 2,
+ "qnli": 2,
+ "rte": 2,
+ "wnli": 2,
+}
+
+
+logging.set_verbosity_info()
+
+
+def convert_xlnet_checkpoint_to_pytorch(
+ tf_checkpoint_path, bert_config_file, pytorch_dump_folder_path, finetuning_task=None
+):
+ # Initialise PyTorch model
+ config = XLNetConfig.from_json_file(bert_config_file)
+
+ finetuning_task = finetuning_task.lower() if finetuning_task is not None else ""
+ if finetuning_task in GLUE_TASKS_NUM_LABELS:
+ print(f"Building PyTorch XLNetForSequenceClassification model from configuration: {config}")
+ config.finetuning_task = finetuning_task
+ config.num_labels = GLUE_TASKS_NUM_LABELS[finetuning_task]
+ model = XLNetForSequenceClassification(config)
+ elif "squad" in finetuning_task:
+ config.finetuning_task = finetuning_task
+ model = XLNetForQuestionAnswering(config)
+ else:
+ model = XLNetLMHeadModel(config)
+
+ # Load weights from tf checkpoint
+ load_tf_weights_in_xlnet(model, config, tf_checkpoint_path)
+
+ # Save pytorch-model
+ pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)
+ pytorch_config_dump_path = os.path.join(pytorch_dump_folder_path, CONFIG_NAME)
+ print(f"Save PyTorch model to {os.path.abspath(pytorch_weights_dump_path)}")
+ torch.save(model.state_dict(), pytorch_weights_dump_path)
+ print(f"Save configuration file to {os.path.abspath(pytorch_config_dump_path)}")
+ with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
+ f.write(config.to_json_string())
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
+ )
+ parser.add_argument(
+ "--xlnet_config_file",
+ default=None,
+ type=str,
+ required=True,
+ help=(
+ "The config json file corresponding to the pre-trained XLNet model. \n"
+ "This specifies the model architecture."
+ ),
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path",
+ default=None,
+ type=str,
+ required=True,
+ help="Path to the folder to store the PyTorch model or dataset/vocab.",
+ )
+ parser.add_argument(
+ "--finetuning_task",
+ default=None,
+ type=str,
+ help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
+ )
+ args = parser.parse_args()
+ print(args)
+
+ convert_xlnet_checkpoint_to_pytorch(
+ args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/modeling_tf_xlnet.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/modeling_tf_xlnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..188f5e39a2fba1a6238fbbf019338579cd68b676
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/modeling_tf_xlnet.py
@@ -0,0 +1,1813 @@
+# coding=utf-8
+# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+ TF 2.0 XLNet model.
+"""
+
+
+from __future__ import annotations
+
+import warnings
+from dataclasses import dataclass
+from typing import List, Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ...activations_tf import get_tf_activation
+from ...modeling_tf_utils import (
+ TFCausalLanguageModelingLoss,
+ TFModelInputType,
+ TFMultipleChoiceLoss,
+ TFPreTrainedModel,
+ TFQuestionAnsweringLoss,
+ TFSequenceClassificationLoss,
+ TFSequenceSummary,
+ TFSharedEmbeddings,
+ TFTokenClassificationLoss,
+ get_initializer,
+ keras,
+ keras_serializable,
+ unpack_inputs,
+)
+from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_xlnet import XLNetConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "xlnet/xlnet-base-cased"
+_CONFIG_FOR_DOC = "XLNetConfig"
+
+
+from ..deprecated._archive_maps import TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+class TFXLNetRelativeAttention(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ if config.d_model % config.n_head != 0:
+ raise ValueError(
+ f"The hidden size ({config.d_model}) is not a multiple of the number of attention "
+ f"heads ({config.n_head}"
+ )
+
+ self.n_head = config.n_head
+ self.d_head = config.d_head
+ self.d_model = config.d_model
+ self.scale = 1 / (config.d_head**0.5)
+ self.initializer_range = config.initializer_range
+ self.output_attentions = config.output_attentions
+
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
+ self.dropout = keras.layers.Dropout(config.dropout)
+ self.config = config
+
+ def build(self, input_shape=None):
+ initializer = get_initializer(self.initializer_range)
+ self.q = self.add_weight(
+ shape=(self.d_model, self.n_head, self.d_head), initializer=initializer, trainable=True, name="q"
+ )
+ self.k = self.add_weight(
+ shape=(self.d_model, self.n_head, self.d_head), initializer=initializer, trainable=True, name="k"
+ )
+ self.v = self.add_weight(
+ shape=(self.d_model, self.n_head, self.d_head), initializer=initializer, trainable=True, name="v"
+ )
+ self.o = self.add_weight(
+ shape=(self.d_model, self.n_head, self.d_head), initializer=initializer, trainable=True, name="o"
+ )
+ self.r = self.add_weight(
+ shape=(self.d_model, self.n_head, self.d_head), initializer=initializer, trainable=True, name="r"
+ )
+ self.r_r_bias = self.add_weight(
+ shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_r_bias"
+ )
+ self.r_s_bias = self.add_weight(
+ shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_s_bias"
+ )
+ self.r_w_bias = self.add_weight(
+ shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_w_bias"
+ )
+ self.seg_embed = self.add_weight(
+ shape=(2, self.n_head, self.d_head), initializer=initializer, trainable=True, name="seg_embed"
+ )
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "layer_norm", None) is not None:
+ with tf.name_scope(self.layer_norm.name):
+ self.layer_norm.build([None, None, self.config.d_model])
+
+ def prune_heads(self, heads):
+ raise NotImplementedError
+
+ def rel_shift(self, x, klen=-1):
+ """perform relative shift to form the relative attention score."""
+ x_size = shape_list(x)
+
+ x = tf.reshape(x, (x_size[1], x_size[0], x_size[2], x_size[3]))
+ x = x[1:, ...]
+ x = tf.reshape(x, (x_size[0], x_size[1] - 1, x_size[2], x_size[3]))
+ x = x[:, 0:klen, :, :]
+ # x = torch.index_select(x, 1, torch.arange(klen, device=x.device, dtype=torch.long))
+
+ return x
+
+ def rel_attn_core(
+ self, q_head, k_head_h, v_head_h, k_head_r, seg_mat, attn_mask, head_mask, output_attentions, training=False
+ ):
+ """Core relative positional attention operations."""
+ # content based attention score
+ ac = tf.einsum("ibnd,jbnd->ijbn", q_head + self.r_w_bias, k_head_h)
+
+ # position based attention score
+ bd = tf.einsum("ibnd,jbnd->ijbn", q_head + self.r_r_bias, k_head_r)
+ bd = self.rel_shift(bd, klen=shape_list(ac)[1])
+
+ # segment based attention score
+ if seg_mat is None:
+ ef = 0
+ else:
+ ef = tf.einsum("ibnd,snd->ibns", q_head + self.r_s_bias, self.seg_embed)
+ ef = tf.einsum("ijbs,ibns->ijbn", seg_mat, ef)
+
+ # merge attention scores and perform masking
+ attn_score = (ac + bd + ef) * self.scale
+ if attn_mask is not None:
+ # attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask
+ if attn_mask.dtype == tf.float16 or attn_mask.dtype == tf.bfloat16:
+ attn_score = attn_score - 65500 * attn_mask
+ else:
+ attn_score = attn_score - 1e30 * attn_mask
+
+ # attention probability
+ attn_prob = stable_softmax(attn_score, axis=1)
+
+ attn_prob = self.dropout(attn_prob, training=training)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attn_prob = attn_prob * head_mask
+
+ # attention output
+ attn_vec = tf.einsum("ijbn,jbnd->ibnd", attn_prob, v_head_h)
+
+ if output_attentions:
+ return attn_vec, attn_prob
+
+ return attn_vec
+
+ def post_attention(self, h, attn_vec, residual=True, training=False):
+ """Post-attention processing."""
+ # post-attention projection (back to `d_model`)
+ attn_out = tf.einsum("ibnd,hnd->ibh", attn_vec, self.o)
+
+ attn_out = self.dropout(attn_out, training=training)
+
+ if residual:
+ attn_out = attn_out + h
+ output = self.layer_norm(attn_out)
+
+ return output
+
+ def call(
+ self,
+ h,
+ g,
+ attn_mask_h,
+ attn_mask_g,
+ r,
+ seg_mat,
+ mems: np.ndarray | tf.Tensor | None = None,
+ target_mapping: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = False,
+ training: bool = False,
+ ):
+ if g is not None:
+ # Two-stream attention with relative positional encoding.
+ # content based attention score
+ if mems is not None and len(shape_list(mems)) > 1:
+ cat = tf.concat([mems, h], axis=0)
+ else:
+ cat = h
+
+ # content-based key head
+ k_head_h = tf.einsum("ibh,hnd->ibnd", cat, self.k)
+
+ # content-based value head
+ v_head_h = tf.einsum("ibh,hnd->ibnd", cat, self.v)
+
+ # position-based key head
+ k_head_r = tf.einsum("ibh,hnd->ibnd", r, self.r)
+
+ # h-stream
+ # content-stream query head
+ q_head_h = tf.einsum("ibh,hnd->ibnd", h, self.q)
+
+ # core attention ops
+ attn_vec_h = self.rel_attn_core(
+ q_head_h,
+ k_head_h,
+ v_head_h,
+ k_head_r,
+ seg_mat,
+ attn_mask_h,
+ head_mask,
+ output_attentions,
+ training=training,
+ )
+
+ if output_attentions:
+ attn_vec_h, attn_prob_h = attn_vec_h
+
+ # post processing
+ output_h = self.post_attention(h, attn_vec_h, training=training)
+
+ # g-stream
+ # query-stream query head
+ q_head_g = tf.einsum("ibh,hnd->ibnd", g, self.q)
+
+ # core attention ops
+ if target_mapping is not None:
+ q_head_g = tf.einsum("mbnd,mlb->lbnd", q_head_g, target_mapping)
+ attn_vec_g = self.rel_attn_core(
+ q_head_g,
+ k_head_h,
+ v_head_h,
+ k_head_r,
+ seg_mat,
+ attn_mask_g,
+ head_mask,
+ output_attentions,
+ training=training,
+ )
+
+ if output_attentions:
+ attn_vec_g, attn_prob_g = attn_vec_g
+
+ attn_vec_g = tf.einsum("lbnd,mlb->mbnd", attn_vec_g, target_mapping)
+ else:
+ attn_vec_g = self.rel_attn_core(
+ q_head_g,
+ k_head_h,
+ v_head_h,
+ k_head_r,
+ seg_mat,
+ attn_mask_g,
+ head_mask,
+ output_attentions,
+ training=training,
+ )
+
+ if output_attentions:
+ attn_vec_g, attn_prob_g = attn_vec_g
+
+ # post processing
+ output_g = self.post_attention(g, attn_vec_g, training=training)
+
+ if output_attentions:
+ attn_prob = attn_prob_h, attn_prob_g
+
+ else:
+ # Multi-head attention with relative positional encoding
+ if mems is not None and len(shape_list(mems)) > 1:
+ cat = tf.concat([mems, h], axis=0)
+ else:
+ cat = h
+
+ # content heads
+ q_head_h = tf.einsum("ibh,hnd->ibnd", h, self.q)
+ k_head_h = tf.einsum("ibh,hnd->ibnd", cat, self.k)
+ v_head_h = tf.einsum("ibh,hnd->ibnd", cat, self.v)
+
+ # positional heads
+ k_head_r = tf.einsum("ibh,hnd->ibnd", r, self.r)
+
+ # core attention ops
+ attn_vec = self.rel_attn_core(
+ q_head_h,
+ k_head_h,
+ v_head_h,
+ k_head_r,
+ seg_mat,
+ attn_mask_h,
+ head_mask,
+ output_attentions,
+ training=training,
+ )
+
+ if output_attentions:
+ attn_vec, attn_prob = attn_vec
+
+ # post processing
+ output_h = self.post_attention(h, attn_vec, training=training)
+ output_g = None
+
+ outputs = (output_h, output_g)
+ if output_attentions:
+ outputs = outputs + (attn_prob,)
+ return outputs
+
+
+class TFXLNetFeedForward(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
+ self.layer_1 = keras.layers.Dense(
+ config.d_inner, kernel_initializer=get_initializer(config.initializer_range), name="layer_1"
+ )
+ self.layer_2 = keras.layers.Dense(
+ config.d_model, kernel_initializer=get_initializer(config.initializer_range), name="layer_2"
+ )
+ self.dropout = keras.layers.Dropout(config.dropout)
+ if isinstance(config.ff_activation, str):
+ self.activation_function = get_tf_activation(config.ff_activation)
+ else:
+ self.activation_function = config.ff_activation
+ self.config = config
+
+ def call(self, inp, training=False):
+ output = inp
+ output = self.layer_1(output)
+ output = self.activation_function(output)
+ output = self.dropout(output, training=training)
+ output = self.layer_2(output)
+ output = self.dropout(output, training=training)
+ output = self.layer_norm(output + inp)
+ return output
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "layer_norm", None) is not None:
+ with tf.name_scope(self.layer_norm.name):
+ self.layer_norm.build([None, None, self.config.d_model])
+ if getattr(self, "layer_1", None) is not None:
+ with tf.name_scope(self.layer_1.name):
+ self.layer_1.build([None, None, self.config.d_model])
+ if getattr(self, "layer_2", None) is not None:
+ with tf.name_scope(self.layer_2.name):
+ self.layer_2.build([None, None, self.config.d_inner])
+
+
+class TFXLNetLayer(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.rel_attn = TFXLNetRelativeAttention(config, name="rel_attn")
+ self.ff = TFXLNetFeedForward(config, name="ff")
+ self.dropout = keras.layers.Dropout(config.dropout)
+
+ def call(
+ self,
+ output_h,
+ output_g,
+ non_tgt_mask,
+ attn_mask,
+ pos_emb,
+ seg_mat,
+ mems: np.ndarray | tf.Tensor | None = None,
+ target_mapping: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = False,
+ training: bool = False,
+ ):
+ outputs = self.rel_attn(
+ output_h,
+ output_g,
+ non_tgt_mask,
+ attn_mask,
+ pos_emb,
+ seg_mat,
+ mems,
+ target_mapping,
+ head_mask,
+ output_attentions,
+ training=training,
+ )
+ output_h, output_g = outputs[:2]
+
+ if output_g is not None:
+ output_g = self.ff(output_g, training=training)
+ output_h = self.ff(output_h, training=training)
+
+ outputs = (output_h, output_g) + outputs[2:] # Add again attentions if there are there
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "rel_attn", None) is not None:
+ with tf.name_scope(self.rel_attn.name):
+ self.rel_attn.build(None)
+ if getattr(self, "ff", None) is not None:
+ with tf.name_scope(self.ff.name):
+ self.ff.build(None)
+
+
+class TFXLNetLMHead(keras.layers.Layer):
+ def __init__(self, config, input_embeddings, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+ # The output weights are the same as the input embeddings, but there is
+ # an output-only bias for each token.
+ self.input_embeddings = input_embeddings
+
+ def build(self, input_shape):
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
+ super().build(input_shape)
+
+ def get_output_embeddings(self):
+ return self.input_embeddings
+
+ def set_output_embeddings(self, value):
+ self.input_embeddings.weight = value
+ self.input_embeddings.vocab_size = shape_list(value)[0]
+
+ def get_bias(self):
+ return {"bias": self.bias}
+
+ def set_bias(self, value):
+ self.bias = value["bias"]
+ self.config.vocab_size = shape_list(value["bias"])[0]
+
+ def call(self, hidden_states):
+ hidden_states = self.input_embeddings(hidden_states, mode="linear")
+ hidden_states = hidden_states + self.bias
+ return hidden_states
+
+
+@keras_serializable
+class TFXLNetMainLayer(keras.layers.Layer):
+ config_class = XLNetConfig
+
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.output_hidden_states = config.output_hidden_states
+ self.output_attentions = config.output_attentions
+ self.return_dict = config.return_dict
+
+ self.mem_len = config.mem_len
+ self.reuse_len = config.reuse_len
+ self.d_model = config.d_model
+ self.same_length = config.same_length
+ self.attn_type = config.attn_type
+ self.bi_data = config.bi_data
+ self.clamp_len = config.clamp_len
+ self.n_layer = config.n_layer
+ self.use_bfloat16 = config.use_bfloat16
+ self.initializer_range = config.initializer_range
+
+ self.word_embedding = TFSharedEmbeddings(
+ config.vocab_size, config.d_model, initializer_range=config.initializer_range, name="word_embedding"
+ )
+ self.layer = [TFXLNetLayer(config, name=f"layer_._{i}") for i in range(config.n_layer)]
+ self.dropout = keras.layers.Dropout(config.dropout)
+
+ self.use_mems_eval = config.use_mems_eval
+ self.use_mems_train = config.use_mems_train
+
+ def get_input_embeddings(self):
+ return self.word_embedding
+
+ def set_input_embeddings(self, value):
+ self.word_embedding.weight = value
+ self.word_embedding.vocab_size = shape_list(value)[0]
+
+ def build(self, input_shape=None):
+ initializer = get_initializer(self.initializer_range)
+ self.mask_emb = self.add_weight(
+ shape=(1, 1, self.d_model), initializer=initializer, trainable=True, name="mask_emb"
+ )
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "word_embedding", None) is not None:
+ with tf.name_scope(self.word_embedding.name):
+ self.word_embedding.build(None)
+ if getattr(self, "layer", None) is not None:
+ for layer in self.layer:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+ def _prune_heads(self, heads_to_prune):
+ raise NotImplementedError
+
+ def create_mask(self, qlen, mlen):
+ """
+ Creates causal attention mask. Float mask where 1.0 indicates masked, 0.0 indicates not-masked.
+
+ Args:
+ qlen: TODO Lysandre didn't fill
+ mlen: TODO Lysandre didn't fill
+
+ ```
+
+ same_length=False: same_length=True:
+ < qlen > < qlen >
+ ^ [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 1 1 1 1]
+ [0 0 0 0 0 0 1 1 1] [1 0 0 0 0 0 1 1 1]
+ qlen [0 0 0 0 0 0 0 1 1] [1 1 0 0 0 0 0 1 1]
+ [0 0 0 0 0 0 0 0 1] [1 1 1 0 0 0 0 0 1]
+ v [0 0 0 0 0 0 0 0 0] [1 1 1 1 0 0 0 0 0]
+ ```
+ """
+ attn_mask = tf.ones([qlen, qlen])
+ mask_u = tf.linalg.band_part(attn_mask, 0, -1)
+ mask_dia = tf.linalg.band_part(attn_mask, 0, 0)
+ attn_mask_pad = tf.zeros([qlen, mlen])
+ ret = tf.concat([attn_mask_pad, mask_u - mask_dia], 1)
+ if self.same_length:
+ mask_l = tf.linalg.band_part(attn_mask, -1, 0)
+ ret = tf.concat([ret[:, :qlen] + mask_l - mask_dia, ret[:, qlen:]], 1)
+ return ret
+
+ def cache_mem(self, curr_out, prev_mem):
+ # cache hidden states into memory.
+ if self.reuse_len is not None and self.reuse_len > 0:
+ curr_out = curr_out[: self.reuse_len]
+
+ if self.mem_len is None or self.mem_len == 0:
+ # If `use_mems` is active but no `mem_len` is defined, the model behaves like GPT-2 at inference time
+ # and returns all of the past and current hidden states.
+ cutoff = 0
+ else:
+ # If `use_mems` is active and `mem_len` is defined, the model returns the last `mem_len` hidden
+ # states. This is the preferred setting for training and long-form generation.
+ cutoff = -self.mem_len
+ if prev_mem is None:
+ # if `use_mems` is active and `mem_len` is defined, the model
+ new_mem = curr_out[cutoff:]
+ else:
+ new_mem = tf.concat([prev_mem, curr_out], 0)[cutoff:]
+
+ return tf.stop_gradient(new_mem)
+
+ @staticmethod
+ def positional_embedding(pos_seq, inv_freq, bsz=None):
+ sinusoid_inp = tf.einsum("i,d->id", pos_seq, inv_freq)
+ pos_emb = tf.concat([tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)], axis=-1)
+ pos_emb = pos_emb[:, None, :]
+
+ if bsz is not None:
+ pos_emb = tf.tile(pos_emb, [1, bsz, 1])
+
+ return pos_emb
+
+ def relative_positional_encoding(self, qlen, klen, bsz=None):
+ """create relative positional encoding."""
+ freq_seq = tf.range(0, self.d_model, 2.0)
+ inv_freq = 1 / (10000 ** (freq_seq / self.d_model))
+
+ if self.attn_type == "bi":
+ # beg, end = klen - 1, -qlen
+ beg, end = klen, -qlen
+ elif self.attn_type == "uni":
+ # beg, end = klen - 1, -1
+ beg, end = klen, -1
+ else:
+ raise ValueError(f"Unknown `attn_type` {self.attn_type}.")
+
+ if self.bi_data:
+ fwd_pos_seq = tf.range(beg, end, -1.0)
+ bwd_pos_seq = tf.range(-beg, -end, 1.0)
+
+ if self.clamp_len > 0:
+ fwd_pos_seq = tf.clip_by_value(fwd_pos_seq, -self.clamp_len, self.clamp_len)
+ bwd_pos_seq = tf.clip_by_value(bwd_pos_seq, -self.clamp_len, self.clamp_len)
+
+ if bsz is not None:
+ if bsz % 2 != 0:
+ raise ValueError(f"With bi_data, the batch size {bsz} should be divisible by 2")
+ fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz // 2)
+ bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq, bsz // 2)
+ else:
+ fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq)
+ bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq)
+
+ pos_emb = tf.concat([fwd_pos_emb, bwd_pos_emb], axis=1)
+ else:
+ fwd_pos_seq = tf.range(beg, end, -1.0)
+ if self.clamp_len > 0:
+ fwd_pos_seq = tf.clip_by_value(fwd_pos_seq, -self.clamp_len, self.clamp_len)
+ pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz)
+
+ return pos_emb
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ mems: np.ndarray | tf.Tensor | None = None,
+ perm_mask: np.ndarray | tf.Tensor | None = None,
+ target_mapping: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ input_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ use_mems: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ):
+ if training and use_mems is None:
+ use_mems = self.use_mems_train
+ else:
+ use_mems = self.use_mems_eval
+
+ # the original code for XLNet uses shapes [len, bsz] with the batch dimension at the end
+ # but we want a unified interface in the library with the batch size on the first dimension
+ # so we move here the first dimension (batch) to the end
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_ids = tf.transpose(input_ids, perm=(1, 0))
+ qlen, bsz = shape_list(input_ids)[:2]
+ elif inputs_embeds is not None:
+ inputs_embeds = tf.transpose(inputs_embeds, perm=(1, 0, 2))
+ qlen, bsz = shape_list(inputs_embeds)[:2]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ token_type_ids = tf.transpose(token_type_ids, perm=(1, 0)) if token_type_ids is not None else None
+ input_mask = tf.transpose(input_mask, perm=(1, 0)) if input_mask is not None else None
+ attention_mask = tf.transpose(attention_mask, perm=(1, 0)) if attention_mask is not None else None
+ perm_mask = tf.transpose(perm_mask, perm=(1, 2, 0)) if perm_mask is not None else None
+ target_mapping = tf.transpose(target_mapping, perm=(1, 2, 0)) if target_mapping is not None else None
+
+ mlen = shape_list(mems[0])[0] if mems is not None and mems[0] is not None else 0
+ klen = mlen + qlen
+
+ # Attention mask
+ # causal attention mask
+ if self.attn_type == "uni":
+ attn_mask = self.create_mask(qlen, mlen)
+ attn_mask = attn_mask[:, :, None, None]
+ elif self.attn_type == "bi":
+ attn_mask = None
+ else:
+ raise ValueError(f"Unsupported attention type: {self.attn_type}")
+
+ # data mask: input mask & perm mask
+ assert input_mask is None or attention_mask is None, (
+ "You can only use one of input_mask (uses 1 for padding) "
+ "or attention_mask (uses 0 for padding, added for compatibility with BERT). Please choose one."
+ )
+ if input_mask is None and attention_mask is not None:
+ one_cst = tf.constant(1.0)
+ input_mask = 1.0 - tf.cast(attention_mask, dtype=one_cst.dtype)
+ if input_mask is not None and perm_mask is not None:
+ data_mask = input_mask[None] + perm_mask
+ elif input_mask is not None and perm_mask is None:
+ data_mask = input_mask[None]
+ elif input_mask is None and perm_mask is not None:
+ data_mask = perm_mask
+ else:
+ data_mask = None
+
+ if data_mask is not None:
+ # all mems can be attended to
+ if mlen > 0:
+ mems_mask = tf.zeros([shape_list(data_mask)[0], mlen, bsz])
+ data_mask = tf.concat([mems_mask, data_mask], axis=1)
+ if attn_mask is None:
+ attn_mask = data_mask[:, :, :, None]
+ else:
+ attn_mask += data_mask[:, :, :, None]
+
+ if attn_mask is not None:
+ attn_mask = tf.cast(attn_mask > 0, dtype=attn_mask.dtype)
+
+ if attn_mask is not None:
+ non_tgt_mask = -tf.eye(qlen)
+ if mlen > 0:
+ non_tgt_mask = tf.concat([tf.zeros([qlen, mlen]), non_tgt_mask], axis=-1)
+ non_tgt_mask = tf.cast((attn_mask + non_tgt_mask[:, :, None, None]) > 0, dtype=non_tgt_mask.dtype)
+ else:
+ non_tgt_mask = None
+
+ # Word embeddings and prepare h & g hidden states
+ if inputs_embeds is not None:
+ word_emb_k = inputs_embeds
+ else:
+ check_embeddings_within_bounds(input_ids, self.word_embedding.vocab_size)
+ word_emb_k = self.word_embedding(input_ids)
+ output_h = self.dropout(word_emb_k, training=training)
+ if target_mapping is not None:
+ word_emb_q = tf.tile(self.mask_emb, [shape_list(target_mapping)[0], bsz, 1])
+ # else: # We removed the inp_q input which was same as target mapping
+ # inp_q_ext = inp_q[:, :, None]
+ # word_emb_q = inp_q_ext * self.mask_emb + (1 - inp_q_ext) * word_emb_k
+ output_g = self.dropout(word_emb_q, training=training)
+ else:
+ output_g = None
+
+ # Segment embedding
+ if token_type_ids is not None:
+ # Convert `token_type_ids` to one-hot `seg_mat`
+ if mlen > 0:
+ mem_pad = tf.zeros([mlen, bsz], dtype=token_type_ids.dtype)
+ cat_ids = tf.concat([mem_pad, token_type_ids], 0)
+ else:
+ cat_ids = token_type_ids
+
+ # `1` indicates not in the same segment [qlen x klen x bsz]
+ seg_mat = tf.cast(
+ tf.logical_not(tf.equal(token_type_ids[:, None], cat_ids[None, :])),
+ dtype=token_type_ids.dtype,
+ )
+ seg_mat = tf.one_hot(seg_mat, 2)
+ else:
+ seg_mat = None
+
+ # Positional encoding
+ pos_emb = self.relative_positional_encoding(qlen, klen, bsz=bsz)
+ pos_emb = self.dropout(pos_emb, training=training)
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
+ # and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
+ if head_mask is not None:
+ raise NotImplementedError
+ else:
+ head_mask = [None] * self.n_layer
+
+ new_mems = ()
+ if mems is None:
+ mems = [None] * len(self.layer)
+
+ attentions = [] if output_attentions else None
+ hidden_states = [] if output_hidden_states else None
+ for i, layer_module in enumerate(self.layer):
+ # cache new mems
+ if use_mems:
+ new_mems = new_mems + (self.cache_mem(output_h, mems[i]),)
+ if output_hidden_states:
+ hidden_states.append((output_h, output_g) if output_g is not None else output_h)
+
+ outputs = layer_module(
+ output_h,
+ output_g,
+ non_tgt_mask,
+ attn_mask,
+ pos_emb,
+ seg_mat,
+ mems[i],
+ target_mapping,
+ head_mask[i],
+ output_attentions,
+ training=training,
+ )
+ output_h, output_g = outputs[:2]
+ if output_attentions:
+ attentions.append(outputs[2])
+
+ # Add last hidden state
+ if output_hidden_states:
+ hidden_states.append((output_h, output_g) if output_g is not None else output_h)
+
+ output = self.dropout(output_g if output_g is not None else output_h, training=training)
+
+ # Prepare outputs, we transpose back here to shape [bsz, len, hidden_dim] (cf. beginning of forward() method)
+ output = tf.transpose(output, perm=(1, 0, 2))
+
+ if not use_mems:
+ new_mems = None
+ if output_hidden_states:
+ if output_g is not None:
+ hidden_states = tuple(tf.transpose(h, perm=(1, 0, 2)) for hs in hidden_states for h in hs)
+ else:
+ hidden_states = tuple(tf.transpose(hs, perm=(1, 0, 2)) for hs in hidden_states)
+ if output_attentions:
+ if target_mapping is not None:
+ # when target_mapping is provided, there are 2-tuple of attentions
+ attentions = tuple(
+ tuple(tf.transpose(attn_stream, perm=(2, 3, 0, 1)) for attn_stream in t) for t in attentions
+ )
+ else:
+ attentions = tuple(tf.transpose(t, perm=(2, 3, 0, 1)) for t in attentions)
+
+ if not return_dict:
+ return tuple(v for v in [output, new_mems, hidden_states, attentions] if v is not None)
+
+ return TFXLNetModelOutput(
+ last_hidden_state=output, mems=new_mems, hidden_states=hidden_states, attentions=attentions
+ )
+
+
+class TFXLNetPreTrainedModel(TFPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = XLNetConfig
+ base_model_prefix = "transformer"
+
+
+@dataclass
+class TFXLNetModelOutput(ModelOutput):
+ """
+ Output type of [`TFXLNetModel`].
+
+ Args:
+ last_hidden_state (`tf.Tensor` of shape `(batch_size, num_predict, hidden_size)`):
+ Sequence of hidden-states at the last layer of the model.
+
+ `num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict`
+ corresponds to `sequence_length`.
+ mems (`List[tf.Tensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The
+ token ids which have their past given to this model should not be passed as `input_ids` as they have
+ already been computed.
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ last_hidden_state: tf.Tensor = None
+ mems: List[tf.Tensor] | None = None
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
+ attentions: Tuple[tf.Tensor, ...] | None = None
+
+
+@dataclass
+class TFXLNetLMHeadModelOutput(ModelOutput):
+ """
+ Output type of [`TFXLNetLMHeadModel`].
+
+ Args:
+ loss (`tf.Tensor` of shape *(1,)*, *optional*, returned when `labels` is provided)
+ Language modeling loss (for next-token prediction).
+ logits (`tf.Tensor` of shape `(batch_size, num_predict, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+
+ `num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict`
+ corresponds to `sequence_length`.
+ mems (`List[tf.Tensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The
+ token ids which have their past given to this model should not be passed as `input_ids` as they have
+ already been computed.
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: tf.Tensor | None = None
+ logits: tf.Tensor = None
+ mems: List[tf.Tensor] | None = None
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
+ attentions: Tuple[tf.Tensor, ...] | None = None
+
+
+@dataclass
+class TFXLNetForSequenceClassificationOutput(ModelOutput):
+ """
+ Output type of [`TFXLNetForSequenceClassification`].
+
+ Args:
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `label` is provided):
+ Classification (or regression if config.num_labels==1) loss.
+ logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`):
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
+ mems (`List[tf.Tensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The
+ token ids which have their past given to this model should not be passed as `input_ids` as they have
+ already been computed.
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: tf.Tensor | None = None
+ logits: tf.Tensor = None
+ mems: List[tf.Tensor] | None = None
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
+ attentions: Tuple[tf.Tensor, ...] | None = None
+
+
+@dataclass
+class TFXLNetForTokenClassificationOutput(ModelOutput):
+ """
+ Output type of [`TFXLNetForTokenClassificationOutput`].
+
+ Args:
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided) :
+ Classification loss.
+ logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.num_labels)`):
+ Classification scores (before SoftMax).
+ mems (`List[tf.Tensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The
+ token ids which have their past given to this model should not be passed as `input_ids` as they have
+ already been computed.
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: tf.Tensor | None = None
+ logits: tf.Tensor = None
+ mems: List[tf.Tensor] | None = None
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
+ attentions: Tuple[tf.Tensor, ...] | None = None
+
+
+@dataclass
+class TFXLNetForMultipleChoiceOutput(ModelOutput):
+ """
+ Output type of [`TFXLNetForMultipleChoice`].
+
+ Args:
+ loss (`tf.Tensor` of shape *(1,)*, *optional*, returned when `labels` is provided):
+ Classification loss.
+ logits (`tf.Tensor` of shape `(batch_size, num_choices)`):
+ *num_choices* is the second dimension of the input tensors. (see *input_ids* above).
+
+ Classification scores (before SoftMax).
+ mems (`List[tf.Tensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The
+ token ids which have their past given to this model should not be passed as `input_ids` as they have
+ already been computed.
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: tf.Tensor | None = None
+ logits: tf.Tensor = None
+ mems: List[tf.Tensor] | None = None
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
+ attentions: Tuple[tf.Tensor, ...] | None = None
+
+
+@dataclass
+class TFXLNetForQuestionAnsweringSimpleOutput(ModelOutput):
+ """
+ Output type of [`TFXLNetForQuestionAnsweringSimple`].
+
+ Args:
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
+ start_logits (`tf.Tensor` of shape `(batch_size, sequence_length,)`):
+ Span-start scores (before SoftMax).
+ end_logits (`tf.Tensor` of shape `(batch_size, sequence_length,)`):
+ Span-end scores (before SoftMax).
+ mems (`List[tf.Tensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The
+ token ids which have their past given to this model should not be passed as `input_ids` as they have
+ already been computed.
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: tf.Tensor | None = None
+ start_logits: tf.Tensor = None
+ end_logits: tf.Tensor = None
+ mems: List[tf.Tensor] | None = None
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
+ attentions: Tuple[tf.Tensor, ...] | None = None
+
+
+XLNET_START_DOCSTRING = r"""
+
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TensorFlow models and layers in `transformers` accept two formats as input:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional argument.
+
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
+ positional argument:
+
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
+
+ Note that when creating models and layers with
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
+ about any of this, as you can just pass inputs like you would to any other Python function!
+
+
+
+ Parameters:
+ config ([`XLNetConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+XLNET_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ mems (`List[torch.FloatTensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states (see `mems` output below) . Can be used to speed up sequential
+ decoding. The token ids which have their past given to this model should not be passed as `input_ids` as
+ they have already been computed.
+
+ `use_mems` has to be set to `True` to make use of `mems`.
+ perm_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, sequence_length)`, *optional*):
+ Mask to indicate the attention pattern for each input token with values selected in `[0, 1]`:
+
+ - if `perm_mask[k, i, j] = 0`, i attend to j in batch k;
+ - if `perm_mask[k, i, j] = 1`, i does not attend to j in batch k.
+
+ If not set, each token attends to all the others (full bidirectional attention). Only used during
+ pretraining (to define factorization order) or for sequential decoding (generation).
+ target_mapping (`torch.FloatTensor` of shape `(batch_size, num_predict, sequence_length)`, *optional*):
+ Mask to indicate the output tokens to use. If `target_mapping[k, i, j] = 1`, the i-th predict in batch k is
+ on the j-th token. Only used during pretraining for partial prediction or for sequential decoding
+ (generation).
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ input_mask (`torch.FloatTensor` of shape `{0}`, *optional*):
+ Mask to avoid performing attention on padding token indices. Negative of `attention_mask`, i.e. with 0 for
+ real tokens and 1 for padding which is kept for compatibility with the original code base.
+
+ Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **masked**,
+ - 0 for tokens that are **not masked**.
+
+ You can only uses one of `input_mask` and `attention_mask`.
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare XLNet Model transformer outputting raw hidden-states without any specific head on top.",
+ XLNET_START_DOCSTRING,
+)
+class TFXLNetModel(TFXLNetPreTrainedModel):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.transformer = TFXLNetMainLayer(config, name="transformer")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFXLNetModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ mems: np.ndarray | tf.Tensor | None = None,
+ perm_mask: np.ndarray | tf.Tensor | None = None,
+ target_mapping: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ input_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ use_mems: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[TFXLNetModelOutput, Tuple[tf.Tensor]]:
+ outputs = self.transformer(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ mems=mems,
+ perm_mask=perm_mask,
+ target_mapping=target_mapping,
+ token_type_ids=token_type_ids,
+ input_mask=input_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ use_mems=use_mems,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "transformer", None) is not None:
+ with tf.name_scope(self.transformer.name):
+ self.transformer.build(None)
+
+
+@add_start_docstrings(
+ """
+ XLNet Model with a language modeling head on top (linear layer with weights tied to the input embeddings).
+ """,
+ XLNET_START_DOCSTRING,
+)
+class TFXLNetLMHeadModel(TFXLNetPreTrainedModel, TFCausalLanguageModelingLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.transformer = TFXLNetMainLayer(config, name="transformer")
+ self.lm_loss = TFXLNetLMHead(config, self.transformer.word_embedding, name="lm_loss")
+ # generate fails to convert to a graph with XLNet
+ self.supports_xla_generation = False
+
+ def get_lm_head(self):
+ return self.lm_loss
+
+ def get_prefix_bias_name(self):
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
+ return self.name + "/" + self.lm_loss.name
+
+ def prepare_inputs_for_generation(self, inputs, past_key_values=None, use_mems=None, **kwargs):
+ # Add dummy token at the end (no attention on this one)
+ effective_batch_size = inputs.shape[0]
+ dummy_token = tf.zeros((effective_batch_size, 1), dtype=inputs.dtype)
+
+ # At every pass, the attention values for the new token and the two last generated tokens
+ # are computed, the rest is reloaded from the `past` cache. A purely auto-regressive model would have
+ # offset = 1; offset = 2 seems to have slightly better computation.
+ offset = 2
+
+ if past_key_values:
+ input_ids = tf.concat([inputs[:, -offset:], dummy_token], axis=1)
+ else:
+ input_ids = tf.concat([inputs, dummy_token], axis=1)
+
+ # Build permutation mask so that previous tokens don't see last token
+ sequence_length = input_ids.shape[1]
+ perm_mask = tf.zeros((effective_batch_size, sequence_length, sequence_length - 1))
+ perm_mask_seq_end = tf.ones((effective_batch_size, sequence_length, 1))
+ perm_mask = tf.concat([perm_mask, perm_mask_seq_end], axis=-1)
+
+ # We'll only predict the last token
+ target_mapping = tf.zeros((effective_batch_size, 1, sequence_length - 1))
+ target_mapping_seq_end = tf.ones((effective_batch_size, 1, 1))
+ target_mapping = tf.concat([target_mapping, target_mapping_seq_end], axis=-1)
+
+ inputs = {
+ "input_ids": input_ids,
+ "perm_mask": perm_mask,
+ "target_mapping": target_mapping,
+ "use_mems": use_mems,
+ }
+
+ # if past is defined in model kwargs then use it for faster decoding
+ if past_key_values:
+ inputs["mems"] = tuple(layer_past[:-offset, :, :] for layer_past in past_key_values)
+
+ return inputs
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=TFXLNetLMHeadModelOutput, config_class=_CONFIG_FOR_DOC)
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ mems: np.ndarray | tf.Tensor | None = None,
+ perm_mask: np.ndarray | tf.Tensor | None = None,
+ target_mapping: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ input_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ use_mems: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: bool = False,
+ ) -> Union[TFXLNetLMHeadModelOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,
+ config.vocab_size - 1]`.
+
+ Return:
+
+ Examples:
+
+ ```python
+ >>> import tensorflow as tf
+ >>> import numpy as np
+ >>> from transformers import AutoTokenizer, TFXLNetLMHeadModel
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("xlnet/xlnet-large-cased")
+ >>> model = TFXLNetLMHeadModel.from_pretrained("xlnet/xlnet-large-cased")
+
+ >>> # We show how to setup inputs to predict a next token using a bi-directional context.
+ >>> input_ids = tf.constant(tokenizer.encode("Hello, my dog is very ", add_special_tokens=True))[
+ ... None, :
+ ... ] # We will predict the masked token
+
+ >>> perm_mask = np.zeros((1, input_ids.shape[1], input_ids.shape[1]))
+ >>> perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token
+
+ >>> target_mapping = np.zeros(
+ ... (1, 1, input_ids.shape[1])
+ ... ) # Shape [1, 1, seq_length] => let's predict one token
+ >>> target_mapping[
+ ... 0, 0, -1
+ ... ] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token)
+
+ >>> outputs = model(
+ ... input_ids,
+ ... perm_mask=tf.constant(perm_mask, dtype=tf.float32),
+ ... target_mapping=tf.constant(target_mapping, dtype=tf.float32),
+ ... )
+
+ >>> next_token_logits = outputs[
+ ... 0
+ ... ] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]
+ ```"""
+ transformer_outputs = self.transformer(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ mems=mems,
+ perm_mask=perm_mask,
+ target_mapping=target_mapping,
+ token_type_ids=token_type_ids,
+ input_mask=input_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ use_mems=use_mems,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ hidden_state = transformer_outputs[0]
+ logits = self.lm_loss(hidden_state, training=training)
+
+ loss = None
+ if labels is not None:
+ loss = self.hf_compute_loss(labels, logits)
+
+ if not return_dict:
+ output = (logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFXLNetLMHeadModelOutput(
+ loss=loss,
+ logits=logits,
+ mems=transformer_outputs.mems,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "transformer", None) is not None:
+ with tf.name_scope(self.transformer.name):
+ self.transformer.build(None)
+ if getattr(self, "lm_loss", None) is not None:
+ with tf.name_scope(self.lm_loss.name):
+ self.lm_loss.build(None)
+
+
+@add_start_docstrings(
+ """
+ XLNet Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g.
+ for GLUE tasks.
+ """,
+ XLNET_START_DOCSTRING,
+)
+class TFXLNetForSequenceClassification(TFXLNetPreTrainedModel, TFSequenceClassificationLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.num_labels = config.num_labels
+
+ self.transformer = TFXLNetMainLayer(config, name="transformer")
+ self.sequence_summary = TFSequenceSummary(
+ config, initializer_range=config.initializer_range, name="sequence_summary"
+ )
+ self.logits_proj = keras.layers.Dense(
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="logits_proj"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFXLNetForSequenceClassificationOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ mems: np.ndarray | tf.Tensor | None = None,
+ perm_mask: np.ndarray | tf.Tensor | None = None,
+ target_mapping: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ input_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ use_mems: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: bool = False,
+ ) -> Union[TFXLNetForSequenceClassificationOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ transformer_outputs = self.transformer(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ mems=mems,
+ perm_mask=perm_mask,
+ target_mapping=target_mapping,
+ token_type_ids=token_type_ids,
+ input_mask=input_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ use_mems=use_mems,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ output = transformer_outputs[0]
+
+ output = self.sequence_summary(output)
+ logits = self.logits_proj(output)
+
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
+
+ if not return_dict:
+ output = (logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFXLNetForSequenceClassificationOutput(
+ loss=loss,
+ logits=logits,
+ mems=transformer_outputs.mems,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "transformer", None) is not None:
+ with tf.name_scope(self.transformer.name):
+ self.transformer.build(None)
+ if getattr(self, "sequence_summary", None) is not None:
+ with tf.name_scope(self.sequence_summary.name):
+ self.sequence_summary.build(None)
+ if getattr(self, "logits_proj", None) is not None:
+ with tf.name_scope(self.logits_proj.name):
+ self.logits_proj.build([None, None, self.config.d_model])
+
+
+@add_start_docstrings(
+ """
+ XLNET Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
+ softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ XLNET_START_DOCSTRING,
+)
+class TFXLNetForMultipleChoice(TFXLNetPreTrainedModel, TFMultipleChoiceLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.transformer = TFXLNetMainLayer(config, name="transformer")
+ self.sequence_summary = TFSequenceSummary(
+ config, initializer_range=config.initializer_range, name="sequence_summary"
+ )
+ self.logits_proj = keras.layers.Dense(
+ 1, kernel_initializer=get_initializer(config.initializer_range), name="logits_proj"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFXLNetForMultipleChoiceOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ input_mask: np.ndarray | tf.Tensor | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ mems: np.ndarray | tf.Tensor | None = None,
+ perm_mask: np.ndarray | tf.Tensor | None = None,
+ target_mapping: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ use_mems: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: bool = False,
+ ) -> Union[TFXLNetForMultipleChoiceOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
+ """
+
+ if input_ids is not None:
+ num_choices = shape_list(input_ids)[1]
+ seq_length = shape_list(input_ids)[2]
+ else:
+ num_choices = shape_list(inputs_embeds)[1]
+ seq_length = shape_list(inputs_embeds)[2]
+
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
+ flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
+ flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
+ flat_input_mask = tf.reshape(input_mask, (-1, seq_length)) if input_mask is not None else None
+ flat_inputs_embeds = (
+ tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3]))
+ if inputs_embeds is not None
+ else None
+ )
+ transformer_outputs = self.transformer(
+ flat_input_ids,
+ flat_attention_mask,
+ mems,
+ perm_mask,
+ target_mapping,
+ flat_token_type_ids,
+ flat_input_mask,
+ head_mask,
+ flat_inputs_embeds,
+ use_mems,
+ output_attentions,
+ output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ output = transformer_outputs[0]
+ logits = self.sequence_summary(output)
+ logits = self.logits_proj(logits)
+ reshaped_logits = tf.reshape(logits, (-1, num_choices))
+ loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits)
+
+ if not return_dict:
+ output = (reshaped_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFXLNetForMultipleChoiceOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ mems=transformer_outputs.mems,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "transformer", None) is not None:
+ with tf.name_scope(self.transformer.name):
+ self.transformer.build(None)
+ if getattr(self, "sequence_summary", None) is not None:
+ with tf.name_scope(self.sequence_summary.name):
+ self.sequence_summary.build(None)
+ if getattr(self, "logits_proj", None) is not None:
+ with tf.name_scope(self.logits_proj.name):
+ self.logits_proj.build([None, None, self.config.d_model])
+
+
+@add_start_docstrings(
+ """
+ XLNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
+ Named-Entity-Recognition (NER) tasks.
+ """,
+ XLNET_START_DOCSTRING,
+)
+class TFXLNetForTokenClassification(TFXLNetPreTrainedModel, TFTokenClassificationLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.num_labels = config.num_labels
+
+ self.transformer = TFXLNetMainLayer(config, name="transformer")
+ self.classifier = keras.layers.Dense(
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFXLNetForTokenClassificationOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ mems: np.ndarray | tf.Tensor | None = None,
+ perm_mask: np.ndarray | tf.Tensor | None = None,
+ target_mapping: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ input_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ use_mems: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: bool = False,
+ ) -> Union[TFXLNetForTokenClassificationOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+
+ transformer_outputs = self.transformer(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ mems=mems,
+ perm_mask=perm_mask,
+ target_mapping=target_mapping,
+ token_type_ids=token_type_ids,
+ input_mask=input_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ use_mems=use_mems,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ output = transformer_outputs[0]
+ logits = self.classifier(output)
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
+
+ if not return_dict:
+ output = (logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFXLNetForTokenClassificationOutput(
+ loss=loss,
+ logits=logits,
+ mems=transformer_outputs.mems,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "transformer", None) is not None:
+ with tf.name_scope(self.transformer.name):
+ self.transformer.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.hidden_size])
+
+
+@add_start_docstrings(
+ """
+ XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ XLNET_START_DOCSTRING,
+)
+class TFXLNetForQuestionAnsweringSimple(TFXLNetPreTrainedModel, TFQuestionAnsweringLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.transformer = TFXLNetMainLayer(config, name="transformer")
+ self.qa_outputs = keras.layers.Dense(
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFXLNetForQuestionAnsweringSimpleOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ mems: np.ndarray | tf.Tensor | None = None,
+ perm_mask: np.ndarray | tf.Tensor | None = None,
+ target_mapping: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ input_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ use_mems: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ start_positions: np.ndarray | tf.Tensor | None = None,
+ end_positions: np.ndarray | tf.Tensor | None = None,
+ training: bool = False,
+ ) -> Union[TFXLNetForQuestionAnsweringSimpleOutput, Tuple[tf.Tensor]]:
+ r"""
+ start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ transformer_outputs = self.transformer(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ mems=mems,
+ perm_mask=perm_mask,
+ target_mapping=target_mapping,
+ token_type_ids=token_type_ids,
+ input_mask=input_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ use_mems=use_mems,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = transformer_outputs[0]
+
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = tf.split(logits, 2, axis=-1)
+ start_logits = tf.squeeze(start_logits, axis=-1)
+ end_logits = tf.squeeze(end_logits, axis=-1)
+
+ loss = None
+ if start_positions is not None and end_positions is not None:
+ labels = {"start_position": start_positions}
+ labels["end_position"] = end_positions
+ loss = self.hf_compute_loss(labels, (start_logits, end_logits))
+
+ if not return_dict:
+ output = (start_logits, end_logits) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFXLNetForQuestionAnsweringSimpleOutput(
+ loss=loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ mems=transformer_outputs.mems,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "transformer", None) is not None:
+ with tf.name_scope(self.transformer.name):
+ self.transformer.build(None)
+ if getattr(self, "qa_outputs", None) is not None:
+ with tf.name_scope(self.qa_outputs.name):
+ self.qa_outputs.build([None, None, self.config.hidden_size])
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/modeling_xlnet.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/modeling_xlnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..78ca545751a4afef20d5c08be32329d84c206e06
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/modeling_xlnet.py
@@ -0,0 +1,2083 @@
+# coding=utf-8
+# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+ PyTorch XLNet model.
+"""
+import warnings
+from dataclasses import dataclass
+from typing import List, Optional, Tuple, Union
+
+import torch
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_utils import PoolerAnswerClass, PoolerEndLogits, PoolerStartLogits, PreTrainedModel, SequenceSummary
+from ...pytorch_utils import apply_chunking_to_forward
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_xlnet import XLNetConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "xlnet/xlnet-base-cased"
+_CONFIG_FOR_DOC = "XLNetConfig"
+
+
+from ..deprecated._archive_maps import XLNET_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+def build_tf_xlnet_to_pytorch_map(model, config, tf_weights=None):
+ """
+ A map of modules from TF to PyTorch. I use a map to keep the PyTorch model as identical to the original PyTorch
+ model as possible.
+ """
+
+ tf_to_pt_map = {}
+
+ if hasattr(model, "transformer"):
+ if hasattr(model, "lm_loss"):
+ # We will load also the output bias
+ tf_to_pt_map["model/lm_loss/bias"] = model.lm_loss.bias
+ if hasattr(model, "sequence_summary") and "model/sequnece_summary/summary/kernel" in tf_weights:
+ # We will load also the sequence summary
+ tf_to_pt_map["model/sequnece_summary/summary/kernel"] = model.sequence_summary.summary.weight
+ tf_to_pt_map["model/sequnece_summary/summary/bias"] = model.sequence_summary.summary.bias
+ if (
+ hasattr(model, "logits_proj")
+ and config.finetuning_task is not None
+ and f"model/regression_{config.finetuning_task}/logit/kernel" in tf_weights
+ ):
+ tf_to_pt_map[f"model/regression_{config.finetuning_task}/logit/kernel"] = model.logits_proj.weight
+ tf_to_pt_map[f"model/regression_{config.finetuning_task}/logit/bias"] = model.logits_proj.bias
+
+ # Now load the rest of the transformer
+ model = model.transformer
+
+ # Embeddings and output
+ tf_to_pt_map.update(
+ {
+ "model/transformer/word_embedding/lookup_table": model.word_embedding.weight,
+ "model/transformer/mask_emb/mask_emb": model.mask_emb,
+ }
+ )
+
+ # Transformer blocks
+ for i, b in enumerate(model.layer):
+ layer_str = f"model/transformer/layer_{i}/"
+ tf_to_pt_map.update(
+ {
+ layer_str + "rel_attn/LayerNorm/gamma": b.rel_attn.layer_norm.weight,
+ layer_str + "rel_attn/LayerNorm/beta": b.rel_attn.layer_norm.bias,
+ layer_str + "rel_attn/o/kernel": b.rel_attn.o,
+ layer_str + "rel_attn/q/kernel": b.rel_attn.q,
+ layer_str + "rel_attn/k/kernel": b.rel_attn.k,
+ layer_str + "rel_attn/r/kernel": b.rel_attn.r,
+ layer_str + "rel_attn/v/kernel": b.rel_attn.v,
+ layer_str + "ff/LayerNorm/gamma": b.ff.layer_norm.weight,
+ layer_str + "ff/LayerNorm/beta": b.ff.layer_norm.bias,
+ layer_str + "ff/layer_1/kernel": b.ff.layer_1.weight,
+ layer_str + "ff/layer_1/bias": b.ff.layer_1.bias,
+ layer_str + "ff/layer_2/kernel": b.ff.layer_2.weight,
+ layer_str + "ff/layer_2/bias": b.ff.layer_2.bias,
+ }
+ )
+
+ # Relative positioning biases
+ if config.untie_r:
+ r_r_list = []
+ r_w_list = []
+ r_s_list = []
+ seg_embed_list = []
+ for b in model.layer:
+ r_r_list.append(b.rel_attn.r_r_bias)
+ r_w_list.append(b.rel_attn.r_w_bias)
+ r_s_list.append(b.rel_attn.r_s_bias)
+ seg_embed_list.append(b.rel_attn.seg_embed)
+ else:
+ r_r_list = [model.r_r_bias]
+ r_w_list = [model.r_w_bias]
+ r_s_list = [model.r_s_bias]
+ seg_embed_list = [model.seg_embed]
+ tf_to_pt_map.update(
+ {
+ "model/transformer/r_r_bias": r_r_list,
+ "model/transformer/r_w_bias": r_w_list,
+ "model/transformer/r_s_bias": r_s_list,
+ "model/transformer/seg_embed": seg_embed_list,
+ }
+ )
+ return tf_to_pt_map
+
+
+def load_tf_weights_in_xlnet(model, config, tf_path):
+ """Load tf checkpoints in a pytorch model"""
+ try:
+ import numpy as np
+ import tensorflow as tf
+ except ImportError:
+ logger.error(
+ "Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
+ "https://www.tensorflow.org/install/ for installation instructions."
+ )
+ raise
+ # Load weights from TF model
+ init_vars = tf.train.list_variables(tf_path)
+ tf_weights = {}
+ for name, shape in init_vars:
+ logger.info(f"Loading TF weight {name} with shape {shape}")
+ array = tf.train.load_variable(tf_path, name)
+ tf_weights[name] = array
+
+ # Build TF to PyTorch weights loading map
+ tf_to_pt_map = build_tf_xlnet_to_pytorch_map(model, config, tf_weights)
+
+ for name, pointer in tf_to_pt_map.items():
+ logger.info(f"Importing {name}")
+ if name not in tf_weights:
+ logger.info(f"{name} not in tf pre-trained weights, skipping")
+ continue
+ array = tf_weights[name]
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
+ # which are not required for using pretrained model
+ if "kernel" in name and ("ff" in name or "summary" in name or "logit" in name):
+ logger.info("Transposing")
+ array = np.transpose(array)
+ if isinstance(pointer, list):
+ # Here we will split the TF weights
+ assert (
+ len(pointer) == array.shape[0]
+ ), f"Pointer length {len(pointer)} and array length {array.shape[0]} mismatched"
+ for i, p_i in enumerate(pointer):
+ arr_i = array[i, ...]
+ try:
+ assert (
+ p_i.shape == arr_i.shape
+ ), f"Pointer shape {p_i.shape} and array shape {arr_i.shape} mismatched"
+ except AssertionError as e:
+ e.args += (p_i.shape, arr_i.shape)
+ raise
+ logger.info(f"Initialize PyTorch weight {name} for layer {i}")
+ p_i.data = torch.from_numpy(arr_i)
+ else:
+ try:
+ assert (
+ pointer.shape == array.shape
+ ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
+ except AssertionError as e:
+ e.args += (pointer.shape, array.shape)
+ raise
+ logger.info(f"Initialize PyTorch weight {name}")
+ pointer.data = torch.from_numpy(array)
+ tf_weights.pop(name, None)
+ tf_weights.pop(name + "/Adam", None)
+ tf_weights.pop(name + "/Adam_1", None)
+
+ logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}")
+ return model
+
+
+class XLNetRelativeAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+
+ if config.d_model % config.n_head != 0:
+ raise ValueError(
+ f"The hidden size ({config.d_model}) is not a multiple of the number of attention "
+ f"heads ({config.n_head}"
+ )
+
+ self.n_head = config.n_head
+ self.d_head = config.d_head
+ self.d_model = config.d_model
+ self.scale = 1 / (config.d_head**0.5)
+
+ self.q = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
+ self.k = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
+ self.v = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
+ self.o = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
+ self.r = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
+
+ self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
+ self.r_s_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
+ self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
+ self.seg_embed = nn.Parameter(torch.FloatTensor(2, self.n_head, self.d_head))
+
+ self.layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.dropout)
+
+ def prune_heads(self, heads):
+ raise NotImplementedError
+
+ @staticmethod
+ def rel_shift(x, klen=-1):
+ """perform relative shift to form the relative attention score."""
+ x_size = x.shape
+
+ x = x.reshape(x_size[1], x_size[0], x_size[2], x_size[3])
+ x = x[1:, ...]
+ x = x.reshape(x_size[0], x_size[1] - 1, x_size[2], x_size[3])
+ # x = x[:, 0:klen, :, :]
+ x = torch.index_select(x, 1, torch.arange(klen, device=x.device, dtype=torch.long))
+
+ return x
+
+ @staticmethod
+ def rel_shift_bnij(x, klen=-1):
+ x_size = x.shape
+
+ x = x.reshape(x_size[0], x_size[1], x_size[3], x_size[2])
+ x = x[:, :, 1:, :]
+ x = x.reshape(x_size[0], x_size[1], x_size[2], x_size[3] - 1)
+ # Note: the tensor-slice form was faster in my testing than torch.index_select
+ # However, tracing doesn't like the nature of the slice, and if klen changes
+ # during the run then it'll fail, whereas index_select will be fine.
+ x = torch.index_select(x, 3, torch.arange(klen, device=x.device, dtype=torch.long))
+ # x = x[:, :, :, :klen]
+
+ return x
+
+ def rel_attn_core(
+ self,
+ q_head,
+ k_head_h,
+ v_head_h,
+ k_head_r,
+ seg_mat=None,
+ attn_mask=None,
+ head_mask=None,
+ output_attentions=False,
+ ):
+ """Core relative positional attention operations."""
+
+ # content based attention score
+ ac = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_w_bias, k_head_h)
+
+ # position based attention score
+ bd = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_r_bias, k_head_r)
+ bd = self.rel_shift_bnij(bd, klen=ac.shape[3])
+
+ # segment based attention score
+ if seg_mat is None:
+ ef = 0
+ else:
+ ef = torch.einsum("ibnd,snd->ibns", q_head + self.r_s_bias, self.seg_embed)
+ ef = torch.einsum("ijbs,ibns->bnij", seg_mat, ef)
+
+ # merge attention scores and perform masking
+ attn_score = (ac + bd + ef) * self.scale
+ if attn_mask is not None:
+ # attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask
+ if attn_mask.dtype == torch.float16:
+ attn_score = attn_score - 65500 * torch.einsum("ijbn->bnij", attn_mask)
+ else:
+ attn_score = attn_score - 1e30 * torch.einsum("ijbn->bnij", attn_mask)
+
+ # attention probability
+ attn_prob = nn.functional.softmax(attn_score, dim=3)
+ attn_prob = self.dropout(attn_prob)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attn_prob = attn_prob * torch.einsum("ijbn->bnij", head_mask)
+
+ # attention output
+ attn_vec = torch.einsum("bnij,jbnd->ibnd", attn_prob, v_head_h)
+
+ if output_attentions:
+ return attn_vec, torch.einsum("bnij->ijbn", attn_prob)
+
+ return attn_vec
+
+ def post_attention(self, h, attn_vec, residual=True):
+ """Post-attention processing."""
+ # post-attention projection (back to `d_model`)
+ attn_out = torch.einsum("ibnd,hnd->ibh", attn_vec, self.o)
+
+ attn_out = self.dropout(attn_out)
+ if residual:
+ attn_out = attn_out + h
+ output = self.layer_norm(attn_out)
+
+ return output
+
+ def forward(
+ self,
+ h,
+ g,
+ attn_mask_h,
+ attn_mask_g,
+ r,
+ seg_mat,
+ mems=None,
+ target_mapping=None,
+ head_mask=None,
+ output_attentions=False,
+ ):
+ if g is not None:
+ # Two-stream attention with relative positional encoding.
+ # content based attention score
+ if mems is not None and mems.dim() > 1:
+ cat = torch.cat([mems, h], dim=0)
+ else:
+ cat = h
+
+ # content-based key head
+ k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k)
+
+ # content-based value head
+ v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v)
+
+ # position-based key head
+ k_head_r = torch.einsum("ibh,hnd->ibnd", r, self.r)
+
+ # h-stream
+ # content-stream query head
+ q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q)
+
+ # core attention ops
+ attn_vec_h = self.rel_attn_core(
+ q_head_h,
+ k_head_h,
+ v_head_h,
+ k_head_r,
+ seg_mat=seg_mat,
+ attn_mask=attn_mask_h,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ )
+
+ if output_attentions:
+ attn_vec_h, attn_prob_h = attn_vec_h
+
+ # post processing
+ output_h = self.post_attention(h, attn_vec_h)
+
+ # g-stream
+ # query-stream query head
+ q_head_g = torch.einsum("ibh,hnd->ibnd", g, self.q)
+
+ # core attention ops
+ if target_mapping is not None:
+ q_head_g = torch.einsum("mbnd,mlb->lbnd", q_head_g, target_mapping)
+ attn_vec_g = self.rel_attn_core(
+ q_head_g,
+ k_head_h,
+ v_head_h,
+ k_head_r,
+ seg_mat=seg_mat,
+ attn_mask=attn_mask_g,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ )
+
+ if output_attentions:
+ attn_vec_g, attn_prob_g = attn_vec_g
+
+ attn_vec_g = torch.einsum("lbnd,mlb->mbnd", attn_vec_g, target_mapping)
+ else:
+ attn_vec_g = self.rel_attn_core(
+ q_head_g,
+ k_head_h,
+ v_head_h,
+ k_head_r,
+ seg_mat=seg_mat,
+ attn_mask=attn_mask_g,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ )
+
+ if output_attentions:
+ attn_vec_g, attn_prob_g = attn_vec_g
+
+ # post processing
+ output_g = self.post_attention(g, attn_vec_g)
+
+ if output_attentions:
+ attn_prob = attn_prob_h, attn_prob_g
+
+ else:
+ # Multi-head attention with relative positional encoding
+ if mems is not None and mems.dim() > 1:
+ cat = torch.cat([mems, h], dim=0)
+ else:
+ cat = h
+
+ # content heads
+ q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q)
+ k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k)
+ v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v)
+
+ # positional heads
+ # type casting for fp16 support
+ k_head_r = torch.einsum("ibh,hnd->ibnd", r.type(self.r.dtype), self.r)
+
+ # core attention ops
+ attn_vec = self.rel_attn_core(
+ q_head_h,
+ k_head_h,
+ v_head_h,
+ k_head_r,
+ seg_mat=seg_mat,
+ attn_mask=attn_mask_h,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ )
+
+ if output_attentions:
+ attn_vec, attn_prob = attn_vec
+
+ # post processing
+ output_h = self.post_attention(h, attn_vec)
+ output_g = None
+
+ outputs = (output_h, output_g)
+ if output_attentions:
+ outputs = outputs + (attn_prob,)
+ return outputs
+
+
+class XLNetFeedForward(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps)
+ self.layer_1 = nn.Linear(config.d_model, config.d_inner)
+ self.layer_2 = nn.Linear(config.d_inner, config.d_model)
+ self.dropout = nn.Dropout(config.dropout)
+ if isinstance(config.ff_activation, str):
+ self.activation_function = ACT2FN[config.ff_activation]
+ else:
+ self.activation_function = config.ff_activation
+
+ def forward(self, inp):
+ output = inp
+ output = self.layer_1(output)
+ output = self.activation_function(output)
+ output = self.dropout(output)
+ output = self.layer_2(output)
+ output = self.dropout(output)
+ output = self.layer_norm(output + inp)
+ return output
+
+
+class XLNetLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.rel_attn = XLNetRelativeAttention(config)
+ self.ff = XLNetFeedForward(config)
+ self.dropout = nn.Dropout(config.dropout)
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+
+ def forward(
+ self,
+ output_h,
+ output_g,
+ attn_mask_h,
+ attn_mask_g,
+ r,
+ seg_mat,
+ mems=None,
+ target_mapping=None,
+ head_mask=None,
+ output_attentions=False,
+ ):
+ outputs = self.rel_attn(
+ output_h,
+ output_g,
+ attn_mask_h,
+ attn_mask_g,
+ r,
+ seg_mat,
+ mems=mems,
+ target_mapping=target_mapping,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ )
+ output_h, output_g = outputs[:2]
+
+ if output_g is not None:
+ output_g = apply_chunking_to_forward(
+ self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, output_g
+ )
+ output_h = apply_chunking_to_forward(self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, output_h)
+
+ outputs = (output_h, output_g) + outputs[2:] # Add again attentions if there are there
+ return outputs
+
+ def ff_chunk(self, output_x):
+ output_x = self.ff(output_x)
+ return output_x
+
+
+class XLNetPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = XLNetConfig
+ load_tf_weights = load_tf_weights_in_xlnet
+ base_model_prefix = "transformer"
+
+ def _init_weights(self, module):
+ """Initialize the weights."""
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+ elif isinstance(module, XLNetRelativeAttention):
+ for param in [
+ module.q,
+ module.k,
+ module.v,
+ module.o,
+ module.r,
+ module.r_r_bias,
+ module.r_s_bias,
+ module.r_w_bias,
+ module.seg_embed,
+ ]:
+ param.data.normal_(mean=0.0, std=self.config.initializer_range)
+ elif isinstance(module, XLNetModel):
+ module.mask_emb.data.normal_(mean=0.0, std=self.config.initializer_range)
+
+
+@dataclass
+class XLNetModelOutput(ModelOutput):
+ """
+ Output type of [`XLNetModel`].
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_predict, hidden_size)`):
+ Sequence of hidden-states at the last layer of the model.
+
+ `num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict`
+ corresponds to `sequence_length`.
+ mems (`List[torch.FloatTensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The
+ token ids which have their past given to this model should not be passed as `input_ids` as they have
+ already been computed.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ last_hidden_state: torch.FloatTensor
+ mems: Optional[List[torch.FloatTensor]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+@dataclass
+class XLNetLMHeadModelOutput(ModelOutput):
+ """
+ Output type of [`XLNetLMHeadModel`].
+
+ Args:
+ loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided)
+ Language modeling loss (for next-token prediction).
+ logits (`torch.FloatTensor` of shape `(batch_size, num_predict, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+
+ `num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict`
+ corresponds to `sequence_length`.
+ mems (`List[torch.FloatTensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The
+ token ids which have their past given to this model should not be passed as `input_ids` as they have
+ already been computed.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ mems: Optional[List[torch.FloatTensor]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+@dataclass
+class XLNetForSequenceClassificationOutput(ModelOutput):
+ """
+ Output type of [`XLNetForSequenceClassification`].
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `label` is provided):
+ Classification (or regression if config.num_labels==1) loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
+ mems (`List[torch.FloatTensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The
+ token ids which have their past given to this model should not be passed as `input_ids` as they have
+ already been computed.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ mems: Optional[List[torch.FloatTensor]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+@dataclass
+class XLNetForTokenClassificationOutput(ModelOutput):
+ """
+ Output type of [`XLNetForTokenClassificationOutput`].
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided) :
+ Classification loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`):
+ Classification scores (before SoftMax).
+ mems (`List[torch.FloatTensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The
+ token ids which have their past given to this model should not be passed as `input_ids` as they have
+ already been computed.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ mems: Optional[List[torch.FloatTensor]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+@dataclass
+class XLNetForMultipleChoiceOutput(ModelOutput):
+ """
+ Output type of [`XLNetForMultipleChoice`].
+
+ Args:
+ loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided):
+ Classification loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`):
+ *num_choices* is the second dimension of the input tensors. (see *input_ids* above).
+
+ Classification scores (before SoftMax).
+ mems (`List[torch.FloatTensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The
+ token ids which have their past given to this model should not be passed as `input_ids` as they have
+ already been computed.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ mems: Optional[List[torch.FloatTensor]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+@dataclass
+class XLNetForQuestionAnsweringSimpleOutput(ModelOutput):
+ """
+ Output type of [`XLNetForQuestionAnsweringSimple`].
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
+ start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length,)`):
+ Span-start scores (before SoftMax).
+ end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length,)`):
+ Span-end scores (before SoftMax).
+ mems (`List[torch.FloatTensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The
+ token ids which have their past given to this model should not be passed as `input_ids` as they have
+ already been computed.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ start_logits: torch.FloatTensor = None
+ end_logits: torch.FloatTensor = None
+ mems: Optional[List[torch.FloatTensor]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+@dataclass
+class XLNetForQuestionAnsweringOutput(ModelOutput):
+ """
+ Output type of [`XLNetForQuestionAnswering`].
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned if both `start_positions` and `end_positions` are provided):
+ Classification loss as the sum of start token, end token (and is_impossible if provided) classification
+ losses.
+ start_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
+ Log probabilities for the top config.start_n_top start token possibilities (beam-search).
+ start_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
+ Indices for the top config.start_n_top start token possibilities (beam-search).
+ end_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
+ Log probabilities for the top `config.start_n_top * config.end_n_top` end token possibilities
+ (beam-search).
+ end_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
+ Indices for the top `config.start_n_top * config.end_n_top` end token possibilities (beam-search).
+ cls_logits (`torch.FloatTensor` of shape `(batch_size,)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
+ Log probabilities for the `is_impossible` label of the answers.
+ mems (`List[torch.FloatTensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The
+ token ids which have their past given to this model should not be passed as `input_ids` as they have
+ already been computed.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ start_top_log_probs: Optional[torch.FloatTensor] = None
+ start_top_index: Optional[torch.LongTensor] = None
+ end_top_log_probs: Optional[torch.FloatTensor] = None
+ end_top_index: Optional[torch.LongTensor] = None
+ cls_logits: Optional[torch.FloatTensor] = None
+ mems: Optional[List[torch.FloatTensor]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+XLNET_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`XLNetConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+XLNET_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ mems (`List[torch.FloatTensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states (see `mems` output below) . Can be used to speed up sequential
+ decoding. The token ids which have their past given to this model should not be passed as `input_ids` as
+ they have already been computed.
+
+ `use_mems` has to be set to `True` to make use of `mems`.
+ perm_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, sequence_length)`, *optional*):
+ Mask to indicate the attention pattern for each input token with values selected in `[0, 1]`:
+
+ - if `perm_mask[k, i, j] = 0`, i attend to j in batch k;
+ - if `perm_mask[k, i, j] = 1`, i does not attend to j in batch k.
+
+ If not set, each token attends to all the others (full bidirectional attention). Only used during
+ pretraining (to define factorization order) or for sequential decoding (generation).
+ target_mapping (`torch.FloatTensor` of shape `(batch_size, num_predict, sequence_length)`, *optional*):
+ Mask to indicate the output tokens to use. If `target_mapping[k, i, j] = 1`, the i-th predict in batch k is
+ on the j-th token. Only used during pretraining for partial prediction or for sequential decoding
+ (generation).
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ input_mask (`torch.FloatTensor` of shape `{0}`, *optional*):
+ Mask to avoid performing attention on padding token indices. Negative of `attention_mask`, i.e. with 0 for
+ real tokens and 1 for padding which is kept for compatibility with the original code base.
+
+ Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **masked**,
+ - 0 for tokens that are **not masked**.
+
+ You can only uses one of `input_mask` and `attention_mask`.
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare XLNet Model transformer outputting raw hidden-states without any specific head on top.",
+ XLNET_START_DOCSTRING,
+)
+class XLNetModel(XLNetPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.mem_len = config.mem_len
+ self.reuse_len = config.reuse_len
+ self.d_model = config.d_model
+ self.same_length = config.same_length
+ self.attn_type = config.attn_type
+ self.bi_data = config.bi_data
+ self.clamp_len = config.clamp_len
+ self.n_layer = config.n_layer
+
+ self.word_embedding = nn.Embedding(config.vocab_size, config.d_model)
+ self.mask_emb = nn.Parameter(torch.FloatTensor(1, 1, config.d_model))
+ self.layer = nn.ModuleList([XLNetLayer(config) for _ in range(config.n_layer)])
+ self.dropout = nn.Dropout(config.dropout)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.word_embedding
+
+ def set_input_embeddings(self, new_embeddings):
+ self.word_embedding = new_embeddings
+
+ def _prune_heads(self, heads_to_prune):
+ raise NotImplementedError
+
+ def create_mask(self, qlen, mlen):
+ """
+ Creates causal attention mask. Float mask where 1.0 indicates masked, 0.0 indicates not-masked.
+
+ Args:
+ qlen: Sequence length
+ mlen: Mask length
+
+ ::
+
+ same_length=False: same_length=True: < qlen > < qlen >
+ ^ [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 1 1 1 1]
+ [0 0 0 0 0 0 1 1 1] [1 0 0 0 0 0 1 1 1]
+ qlen [0 0 0 0 0 0 0 1 1] [1 1 0 0 0 0 0 1 1]
+ [0 0 0 0 0 0 0 0 1] [1 1 1 0 0 0 0 0 1]
+ v [0 0 0 0 0 0 0 0 0] [1 1 1 1 0 0 0 0 0]
+
+ """
+ mask = torch.ones((qlen, qlen + mlen), device=self.device)
+ if self.same_length:
+ mask_lo = mask[:, :qlen].tril(-1)
+ mask.triu_(mlen + 1)
+ mask[:, :qlen] += mask_lo
+ else:
+ mask.triu_(mlen + 1)
+
+ return mask
+
+ def cache_mem(self, curr_out, prev_mem):
+ # cache hidden states into memory.
+ if self.reuse_len is not None and self.reuse_len > 0:
+ curr_out = curr_out[: self.reuse_len]
+
+ if self.mem_len is None or self.mem_len == 0:
+ # If `use_mems` is active but no `mem_len` is defined, the model behaves like GPT-2 at inference time
+ # and returns all of the past and current hidden states.
+ cutoff = 0
+ else:
+ # If `use_mems` is active and `mem_len` is defined, the model returns the last `mem_len` hidden
+ # states. This is the preferred setting for training and long-form generation.
+ cutoff = -self.mem_len
+ if prev_mem is None:
+ # if `use_mems` is active and `mem_len` is defined, the model
+ new_mem = curr_out[cutoff:]
+ else:
+ new_mem = torch.cat([prev_mem, curr_out], dim=0)[cutoff:]
+
+ return new_mem.detach()
+
+ @staticmethod
+ def positional_embedding(pos_seq, inv_freq, bsz=None):
+ sinusoid_inp = torch.einsum("i,d->id", pos_seq, inv_freq)
+ pos_emb = torch.cat([torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)], dim=-1)
+ pos_emb = pos_emb[:, None, :]
+
+ if bsz is not None:
+ pos_emb = pos_emb.expand(-1, bsz, -1)
+
+ return pos_emb
+
+ def relative_positional_encoding(self, qlen, klen, bsz=None):
+ # create relative positional encoding.
+ freq_seq = torch.arange(0, self.d_model, 2.0, dtype=torch.int64).float()
+ inv_freq = 1 / torch.pow(10000, (freq_seq / self.d_model))
+
+ if self.attn_type == "bi":
+ # beg, end = klen - 1, -qlen
+ beg, end = klen, -qlen
+ elif self.attn_type == "uni":
+ # beg, end = klen - 1, -1
+ beg, end = klen, -1
+ else:
+ raise ValueError(f"Unknown `attn_type` {self.attn_type}.")
+
+ if self.bi_data:
+ fwd_pos_seq = torch.arange(beg, end, -1.0, dtype=torch.int64).float()
+ bwd_pos_seq = torch.arange(-beg, -end, 1.0, dtype=torch.int64).float()
+
+ if self.clamp_len > 0:
+ fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
+ bwd_pos_seq = bwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
+
+ if bsz is not None:
+ fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz // 2)
+ bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq, bsz // 2)
+ else:
+ fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq)
+ bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq)
+
+ pos_emb = torch.cat([fwd_pos_emb, bwd_pos_emb], dim=1)
+ else:
+ fwd_pos_seq = torch.arange(beg, end, -1.0, dtype=torch.int64).float()
+ if self.clamp_len > 0:
+ fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
+ pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz)
+
+ return pos_emb
+
+ @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=XLNetModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ mems: Optional[torch.Tensor] = None,
+ perm_mask: Optional[torch.Tensor] = None,
+ target_mapping: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ input_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ use_mems: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs, # delete after depreciation warning is removed
+ ) -> Union[Tuple, XLNetModelOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if "use_cache" in kwargs:
+ warnings.warn(
+ "The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems`"
+ " instead.",
+ FutureWarning,
+ )
+ use_mems = kwargs["use_cache"]
+
+ if self.training:
+ use_mems = use_mems if use_mems is not None else self.config.use_mems_train
+ else:
+ use_mems = use_mems if use_mems is not None else self.config.use_mems_eval
+
+ # the original code for XLNet uses shapes [len, bsz] with the batch dimension at the end
+ # but we want a unified interface in the library with the batch size on the first dimension
+ # so we move here the first dimension (batch) to the end
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_ids = input_ids.transpose(0, 1).contiguous()
+ qlen, bsz = input_ids.shape[0], input_ids.shape[1]
+ elif inputs_embeds is not None:
+ inputs_embeds = inputs_embeds.transpose(0, 1).contiguous()
+ qlen, bsz = inputs_embeds.shape[0], inputs_embeds.shape[1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ token_type_ids = token_type_ids.transpose(0, 1).contiguous() if token_type_ids is not None else None
+ input_mask = input_mask.transpose(0, 1).contiguous() if input_mask is not None else None
+ attention_mask = attention_mask.transpose(0, 1).contiguous() if attention_mask is not None else None
+ perm_mask = perm_mask.permute(1, 2, 0).contiguous() if perm_mask is not None else None
+ target_mapping = target_mapping.permute(1, 2, 0).contiguous() if target_mapping is not None else None
+
+ mlen = mems[0].shape[0] if mems is not None and mems[0] is not None else 0
+ klen = mlen + qlen
+
+ dtype_float = self.dtype
+ device = self.device
+
+ # Attention mask
+ # causal attention mask
+ if self.attn_type == "uni":
+ attn_mask = self.create_mask(qlen, mlen)
+ attn_mask = attn_mask[:, :, None, None]
+ elif self.attn_type == "bi":
+ attn_mask = None
+ else:
+ raise ValueError(f"Unsupported attention type: {self.attn_type}")
+
+ # data mask: input mask & perm mask
+ assert input_mask is None or attention_mask is None, "You can only use one of input_mask (uses 1 for padding) "
+ "or attention_mask (uses 0 for padding, added for compatibility with BERT). Please choose one."
+ if input_mask is None and attention_mask is not None:
+ input_mask = 1.0 - attention_mask
+ if input_mask is not None and perm_mask is not None:
+ data_mask = input_mask[None] + perm_mask
+ elif input_mask is not None and perm_mask is None:
+ data_mask = input_mask[None]
+ elif input_mask is None and perm_mask is not None:
+ data_mask = perm_mask
+ else:
+ data_mask = None
+
+ if data_mask is not None:
+ # all mems can be attended to
+ if mlen > 0:
+ mems_mask = torch.zeros([data_mask.shape[0], mlen, bsz]).to(data_mask)
+ data_mask = torch.cat([mems_mask, data_mask], dim=1)
+ if attn_mask is None:
+ attn_mask = data_mask[:, :, :, None]
+ else:
+ attn_mask += data_mask[:, :, :, None]
+
+ if attn_mask is not None:
+ attn_mask = (attn_mask > 0).to(dtype_float)
+
+ if attn_mask is not None:
+ non_tgt_mask = -torch.eye(qlen).to(attn_mask)
+ if mlen > 0:
+ non_tgt_mask = torch.cat([torch.zeros([qlen, mlen]).to(attn_mask), non_tgt_mask], dim=-1)
+ non_tgt_mask = ((attn_mask + non_tgt_mask[:, :, None, None]) > 0).to(attn_mask)
+ else:
+ non_tgt_mask = None
+
+ # Word embeddings and prepare h & g hidden states
+ if inputs_embeds is not None:
+ word_emb_k = inputs_embeds
+ else:
+ word_emb_k = self.word_embedding(input_ids)
+ output_h = self.dropout(word_emb_k)
+ if target_mapping is not None:
+ word_emb_q = self.mask_emb.expand(target_mapping.shape[0], bsz, -1)
+ # else: # We removed the inp_q input which was same as target mapping
+ # inp_q_ext = inp_q[:, :, None]
+ # word_emb_q = inp_q_ext * self.mask_emb + (1 - inp_q_ext) * word_emb_k
+ output_g = self.dropout(word_emb_q)
+ else:
+ output_g = None
+
+ # Segment embedding
+ if token_type_ids is not None:
+ # Convert `token_type_ids` to one-hot `seg_mat`
+ if mlen > 0:
+ mem_pad = torch.zeros([mlen, bsz], dtype=torch.long, device=device)
+ cat_ids = torch.cat([mem_pad, token_type_ids], dim=0)
+ else:
+ cat_ids = token_type_ids
+
+ # `1` indicates not in the same segment [qlen x klen x bsz]
+ seg_mat = (token_type_ids[:, None] != cat_ids[None, :]).long()
+ seg_mat = nn.functional.one_hot(seg_mat, num_classes=2).to(dtype_float)
+ else:
+ seg_mat = None
+
+ # Positional encoding
+ pos_emb = self.relative_positional_encoding(qlen, klen, bsz=bsz)
+ pos_emb = pos_emb.to(output_h.device)
+ pos_emb = self.dropout(pos_emb)
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
+ # and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
+ if head_mask is not None:
+ if head_mask.dim() == 1:
+ head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0)
+ head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1)
+ elif head_mask.dim() == 2:
+ head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1)
+ head_mask = head_mask.to(
+ dtype=next(self.parameters()).dtype
+ ) # switch to float if need + fp16 compatibility
+ else:
+ head_mask = [None] * self.n_layer
+
+ new_mems = ()
+ if mems is None:
+ mems = [None] * len(self.layer)
+
+ attentions = [] if output_attentions else None
+ hidden_states = [] if output_hidden_states else None
+ for i, layer_module in enumerate(self.layer):
+ if use_mems:
+ # cache new mems
+ new_mems = new_mems + (self.cache_mem(output_h, mems[i]),)
+ if output_hidden_states:
+ hidden_states.append((output_h, output_g) if output_g is not None else output_h)
+
+ outputs = layer_module(
+ output_h,
+ output_g,
+ attn_mask_h=non_tgt_mask,
+ attn_mask_g=attn_mask,
+ r=pos_emb,
+ seg_mat=seg_mat,
+ mems=mems[i],
+ target_mapping=target_mapping,
+ head_mask=head_mask[i],
+ output_attentions=output_attentions,
+ )
+ output_h, output_g = outputs[:2]
+ if output_attentions:
+ attentions.append(outputs[2])
+
+ # Add last hidden state
+ if output_hidden_states:
+ hidden_states.append((output_h, output_g) if output_g is not None else output_h)
+
+ output = self.dropout(output_g if output_g is not None else output_h)
+
+ # Prepare outputs, we transpose back here to shape [bsz, len, hidden_dim] (cf. beginning of forward() method)
+ output = output.permute(1, 0, 2).contiguous()
+
+ if not use_mems:
+ new_mems = None
+
+ if output_hidden_states:
+ if output_g is not None:
+ hidden_states = tuple(h.permute(1, 0, 2).contiguous() for hs in hidden_states for h in hs)
+ else:
+ hidden_states = tuple(hs.permute(1, 0, 2).contiguous() for hs in hidden_states)
+
+ if output_attentions:
+ if target_mapping is not None:
+ # when target_mapping is provided, there are 2-tuple of attentions
+ attentions = tuple(
+ tuple(att_stream.permute(2, 3, 0, 1).contiguous() for att_stream in t) for t in attentions
+ )
+ else:
+ attentions = tuple(t.permute(2, 3, 0, 1).contiguous() for t in attentions)
+
+ if not return_dict:
+ return tuple(v for v in [output, new_mems, hidden_states, attentions] if v is not None)
+
+ return XLNetModelOutput(
+ last_hidden_state=output, mems=new_mems, hidden_states=hidden_states, attentions=attentions
+ )
+
+
+@add_start_docstrings(
+ """
+ XLNet Model with a language modeling head on top (linear layer with weights tied to the input embeddings).
+ """,
+ XLNET_START_DOCSTRING,
+)
+class XLNetLMHeadModel(XLNetPreTrainedModel):
+ _tied_weights_keys = ["lm_loss.weight"]
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.attn_type = config.attn_type
+ self.same_length = config.same_length
+
+ self.transformer = XLNetModel(config)
+ self.lm_loss = nn.Linear(config.d_model, config.vocab_size, bias=True)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.lm_loss
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_loss = new_embeddings
+
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, use_mems=None, **kwargs):
+ # Add dummy token at the end (no attention on this one)
+
+ effective_batch_size = input_ids.shape[0]
+ dummy_token = torch.zeros((effective_batch_size, 1), dtype=torch.long, device=input_ids.device)
+
+ # At every pass, the attention values for the new token and the two last generated tokens
+ # are computed, the rest is reloaded from the `past` cache. A purely auto-regressive model would have
+ # offset = 1; offset = 2 seems to have slightly better computation.
+ offset = 2
+
+ if past_key_values:
+ input_ids = torch.cat([input_ids[:, -offset:], dummy_token], dim=1)
+ else:
+ input_ids = torch.cat([input_ids, dummy_token], dim=1)
+
+ # Build permutation mask so that previous tokens don't see last token
+ sequence_length = input_ids.shape[1]
+ perm_mask = torch.zeros(
+ (effective_batch_size, sequence_length, sequence_length), dtype=torch.float, device=input_ids.device
+ )
+ perm_mask[:, :, -1] = 1.0
+
+ # We'll only predict the last token
+ target_mapping = torch.zeros(
+ (effective_batch_size, 1, sequence_length), dtype=torch.float, device=input_ids.device
+ )
+ target_mapping[:, 0, -1] = 1.0
+
+ inputs = {
+ "input_ids": input_ids,
+ "perm_mask": perm_mask,
+ "target_mapping": target_mapping,
+ "use_mems": use_mems,
+ }
+
+ # if past is defined in model kwargs then use it for faster decoding
+ if past_key_values:
+ inputs["mems"] = tuple(layer_past[:-offset, :, :] for layer_past in past_key_values)
+
+ return inputs
+
+ @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=XLNetLMHeadModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ mems: Optional[torch.Tensor] = None,
+ perm_mask: Optional[torch.Tensor] = None,
+ target_mapping: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ input_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ use_mems: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs, # delete when `use_cache` is removed in XLNetModel
+ ) -> Union[Tuple, XLNetLMHeadModelOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, num_predict)`, *optional*):
+ Labels for masked language modeling. `num_predict` corresponds to `target_mapping.shape[1]`. If
+ `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`.
+
+ The labels should correspond to the masked input words that should be predicted and depends on
+ `target_mapping`. Note in order to perform standard auto-regressive language modeling a ** token has
+ to be added to the `input_ids` (see the `prepare_inputs_for_generation` function and examples below)
+
+ Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored, the loss
+ is only computed for labels in `[0, ..., config.vocab_size]`
+
+ Return:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, XLNetLMHeadModel
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("xlnet/xlnet-large-cased")
+ >>> model = XLNetLMHeadModel.from_pretrained("xlnet/xlnet-large-cased")
+
+ >>> # We show how to setup inputs to predict a next token using a bi-directional context.
+ >>> input_ids = torch.tensor(
+ ... tokenizer.encode("Hello, my dog is very ", add_special_tokens=False)
+ ... ).unsqueeze(
+ ... 0
+ ... ) # We will predict the masked token
+ >>> perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float)
+ >>> perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token
+ >>> target_mapping = torch.zeros(
+ ... (1, 1, input_ids.shape[1]), dtype=torch.float
+ ... ) # Shape [1, 1, seq_length] => let's predict one token
+ >>> target_mapping[
+ ... 0, 0, -1
+ ... ] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token)
+
+ >>> outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping)
+ >>> next_token_logits = outputs[
+ ... 0
+ ... ] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]
+
+ >>> # The same way can the XLNetLMHeadModel be used to be trained by standard auto-regressive language modeling.
+ >>> input_ids = torch.tensor(
+ ... tokenizer.encode("Hello, my dog is very ", add_special_tokens=False)
+ ... ).unsqueeze(
+ ... 0
+ ... ) # We will predict the masked token
+ >>> labels = torch.tensor(tokenizer.encode("cute", add_special_tokens=False)).unsqueeze(0)
+ >>> assert labels.shape[0] == 1, "only one word will be predicted"
+ >>> perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float)
+ >>> perm_mask[
+ ... :, :, -1
+ ... ] = 1.0 # Previous tokens don't see last token as is done in standard auto-regressive lm training
+ >>> target_mapping = torch.zeros(
+ ... (1, 1, input_ids.shape[1]), dtype=torch.float
+ ... ) # Shape [1, 1, seq_length] => let's predict one token
+ >>> target_mapping[
+ ... 0, 0, -1
+ ... ] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token)
+
+ >>> outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping, labels=labels)
+ >>> loss = outputs.loss
+ >>> next_token_logits = (
+ ... outputs.logits
+ ... ) # Logits have shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.transformer(
+ input_ids,
+ attention_mask=attention_mask,
+ mems=mems,
+ perm_mask=perm_mask,
+ target_mapping=target_mapping,
+ token_type_ids=token_type_ids,
+ input_mask=input_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ use_mems=use_mems,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ **kwargs,
+ )
+
+ logits = self.lm_loss(transformer_outputs[0])
+
+ loss = None
+ if labels is not None:
+ # Flatten the tokens
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return XLNetLMHeadModelOutput(
+ loss=loss,
+ logits=logits,
+ mems=transformer_outputs.mems,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+ @staticmethod
+ def _reorder_cache(mems: List[torch.Tensor], beam_idx: torch.Tensor) -> List[torch.Tensor]:
+ """
+ This function is used to re-order the `mems` cache if [`~PreTrainedModel.beam_search`] or
+ [`~PreTrainedModel.beam_sample`] is called. This is required to match `mems` with the correct beam_idx at every
+ generation step.
+ """
+ return [layer_past.index_select(1, beam_idx.to(layer_past.device)) for layer_past in mems]
+
+
+@add_start_docstrings(
+ """
+ XLNet Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g.
+ for GLUE tasks.
+ """,
+ XLNET_START_DOCSTRING,
+)
+class XLNetForSequenceClassification(XLNetPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.config = config
+
+ self.transformer = XLNetModel(config)
+ self.sequence_summary = SequenceSummary(config)
+ self.logits_proj = nn.Linear(config.d_model, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=XLNetForSequenceClassificationOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ mems: Optional[torch.Tensor] = None,
+ perm_mask: Optional[torch.Tensor] = None,
+ target_mapping: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ input_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ use_mems: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs, # delete when `use_cache` is removed in XLNetModel
+ ) -> Union[Tuple, XLNetForSequenceClassificationOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.transformer(
+ input_ids,
+ attention_mask=attention_mask,
+ mems=mems,
+ perm_mask=perm_mask,
+ target_mapping=target_mapping,
+ token_type_ids=token_type_ids,
+ input_mask=input_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ use_mems=use_mems,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ **kwargs,
+ )
+ output = transformer_outputs[0]
+
+ output = self.sequence_summary(output)
+ logits = self.logits_proj(output)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return XLNetForSequenceClassificationOutput(
+ loss=loss,
+ logits=logits,
+ mems=transformer_outputs.mems,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ XLNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
+ Named-Entity-Recognition (NER) tasks.
+ """,
+ XLNET_START_DOCSTRING,
+)
+class XLNetForTokenClassification(XLNetPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.transformer = XLNetModel(config)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=XLNetForTokenClassificationOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ mems: Optional[torch.Tensor] = None,
+ perm_mask: Optional[torch.Tensor] = None,
+ target_mapping: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ input_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ use_mems: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs, # delete when `use_cache` is removed in XLNetModel
+ ) -> Union[Tuple, XLNetForTokenClassificationOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
+ where *num_choices* is the size of the second dimension of the input tensors. (see *input_ids* above)
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.transformer(
+ input_ids,
+ attention_mask=attention_mask,
+ mems=mems,
+ perm_mask=perm_mask,
+ target_mapping=target_mapping,
+ token_type_ids=token_type_ids,
+ input_mask=input_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ use_mems=use_mems,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return XLNetForTokenClassificationOutput(
+ loss=loss,
+ logits=logits,
+ mems=outputs.mems,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ XLNet Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
+ softmax) e.g. for RACE/SWAG tasks.
+ """,
+ XLNET_START_DOCSTRING,
+)
+class XLNetForMultipleChoice(XLNetPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.transformer = XLNetModel(config)
+ self.sequence_summary = SequenceSummary(config)
+ self.logits_proj = nn.Linear(config.d_model, 1)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=XLNetForMultipleChoiceOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ input_mask: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ mems: Optional[torch.Tensor] = None,
+ perm_mask: Optional[torch.Tensor] = None,
+ target_mapping: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ use_mems: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs, # delete when `use_cache` is removed in XLNetModel
+ ) -> Union[Tuple, XLNetForMultipleChoiceOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
+ `input_ids` above)
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
+
+ flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
+ flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
+ flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
+ flat_input_mask = input_mask.view(-1, input_mask.size(-1)) if input_mask is not None else None
+ flat_inputs_embeds = (
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
+ if inputs_embeds is not None
+ else None
+ )
+
+ transformer_outputs = self.transformer(
+ flat_input_ids,
+ token_type_ids=flat_token_type_ids,
+ input_mask=flat_input_mask,
+ attention_mask=flat_attention_mask,
+ mems=mems,
+ perm_mask=perm_mask,
+ target_mapping=target_mapping,
+ head_mask=head_mask,
+ inputs_embeds=flat_inputs_embeds,
+ use_mems=use_mems,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ **kwargs,
+ )
+
+ output = transformer_outputs[0]
+
+ output = self.sequence_summary(output)
+ logits = self.logits_proj(output)
+ reshaped_logits = logits.view(-1, num_choices)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(reshaped_logits, labels.view(-1))
+
+ if not return_dict:
+ output = (reshaped_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return XLNetForMultipleChoiceOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ mems=transformer_outputs.mems,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ XLNET_START_DOCSTRING,
+)
+class XLNetForQuestionAnsweringSimple(XLNetPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.transformer = XLNetModel(config)
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=XLNetForQuestionAnsweringSimpleOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ mems: Optional[torch.Tensor] = None,
+ perm_mask: Optional[torch.Tensor] = None,
+ target_mapping: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ input_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ start_positions: Optional[torch.Tensor] = None,
+ end_positions: Optional[torch.Tensor] = None,
+ use_mems: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs, # delete when `use_cache` is removed in XLNetModel
+ ) -> Union[Tuple, XLNetForQuestionAnsweringSimpleOutput]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.transformer(
+ input_ids,
+ attention_mask=attention_mask,
+ mems=mems,
+ perm_mask=perm_mask,
+ target_mapping=target_mapping,
+ token_type_ids=token_type_ids,
+ input_mask=input_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ use_mems=use_mems,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ **kwargs,
+ )
+
+ sequence_output = outputs[0]
+
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = logits.split(1, dim=-1)
+ start_logits = start_logits.squeeze(-1).contiguous()
+ end_logits = end_logits.squeeze(-1).contiguous()
+
+ total_loss = None
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, split add a dimension
+ if len(start_positions.size()) > 1:
+ start_positions = start_positions.squeeze(-1)
+ if len(end_positions.size()) > 1:
+ end_positions = end_positions.squeeze(-1)
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
+ ignored_index = start_logits.size(1)
+ start_positions = start_positions.clamp(0, ignored_index)
+ end_positions = end_positions.clamp(0, ignored_index)
+
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[1:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return XLNetForQuestionAnsweringSimpleOutput(
+ loss=total_loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ mems=outputs.mems,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ XLNET_START_DOCSTRING,
+)
+class XLNetForQuestionAnswering(XLNetPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.start_n_top = config.start_n_top
+ self.end_n_top = config.end_n_top
+
+ self.transformer = XLNetModel(config)
+ self.start_logits = PoolerStartLogits(config)
+ self.end_logits = PoolerEndLogits(config)
+ self.answer_class = PoolerAnswerClass(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=XLNetForQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ mems: Optional[torch.Tensor] = None,
+ perm_mask: Optional[torch.Tensor] = None,
+ target_mapping: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ input_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ start_positions: Optional[torch.Tensor] = None,
+ end_positions: Optional[torch.Tensor] = None,
+ is_impossible: Optional[torch.Tensor] = None,
+ cls_index: Optional[torch.Tensor] = None,
+ p_mask: Optional[torch.Tensor] = None,
+ use_mems: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs, # delete when `use_cache` is removed in XLNetModel
+ ) -> Union[Tuple, XLNetForQuestionAnsweringOutput]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ is_impossible (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels whether a question has an answer or no answer (SQuAD 2.0)
+ cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the classification token to use as input for computing plausibility of the
+ answer.
+ p_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...). 1.0 means token should be
+ masked. 0.0 mean token is not masked.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, XLNetForQuestionAnswering
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("xlnet/xlnet-base-cased")
+ >>> model = XLNetForQuestionAnswering.from_pretrained("xlnet/xlnet-base-cased")
+
+ >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(
+ ... 0
+ ... ) # Batch size 1
+ >>> start_positions = torch.tensor([1])
+ >>> end_positions = torch.tensor([3])
+ >>> outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
+
+ >>> loss = outputs.loss
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.transformer(
+ input_ids,
+ attention_mask=attention_mask,
+ mems=mems,
+ perm_mask=perm_mask,
+ target_mapping=target_mapping,
+ token_type_ids=token_type_ids,
+ input_mask=input_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ use_mems=use_mems,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ **kwargs,
+ )
+ hidden_states = transformer_outputs[0]
+ start_logits = self.start_logits(hidden_states, p_mask=p_mask)
+
+ outputs = transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it
+
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, let's remove the dimension added by batch splitting
+ for x in (start_positions, end_positions, cls_index, is_impossible):
+ if x is not None and x.dim() > 1:
+ x.squeeze_(-1)
+
+ # during training, compute the end logits based on the ground truth of the start position
+ end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)
+
+ loss_fct = CrossEntropyLoss()
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if cls_index is not None and is_impossible is not None:
+ # Predict answerability from the representation of CLS and START
+ cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
+ loss_fct_cls = nn.BCEWithLogitsLoss()
+ cls_loss = loss_fct_cls(cls_logits, is_impossible)
+
+ # note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
+ total_loss += cls_loss * 0.5
+
+ if not return_dict:
+ return (total_loss,) + transformer_outputs[1:]
+ else:
+ return XLNetForQuestionAnsweringOutput(
+ loss=total_loss,
+ mems=transformer_outputs.mems,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+ else:
+ # during inference, compute the end logits based on beam search
+ bsz, slen, hsz = hidden_states.size()
+ start_log_probs = nn.functional.softmax(start_logits, dim=-1) # shape (bsz, slen)
+
+ start_top_log_probs, start_top_index = torch.topk(
+ start_log_probs, self.start_n_top, dim=-1
+ ) # shape (bsz, start_n_top)
+ start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz)
+ start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz)
+ start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz)
+
+ hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(
+ start_states
+ ) # shape (bsz, slen, start_n_top, hsz)
+ p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
+ end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
+ end_log_probs = nn.functional.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top)
+
+ end_top_log_probs, end_top_index = torch.topk(
+ end_log_probs, self.end_n_top, dim=1
+ ) # shape (bsz, end_n_top, start_n_top)
+ end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
+ end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)
+
+ start_states = torch.einsum(
+ "blh,bl->bh", hidden_states, start_log_probs
+ ) # get the representation of START as weighted sum of hidden states
+ cls_logits = self.answer_class(
+ hidden_states, start_states=start_states, cls_index=cls_index
+ ) # Shape (batch size,): one single `cls_logits` for each sample
+
+ if not return_dict:
+ outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits)
+ return outputs + transformer_outputs[1:]
+ else:
+ return XLNetForQuestionAnsweringOutput(
+ start_top_log_probs=start_top_log_probs,
+ start_top_index=start_top_index,
+ end_top_log_probs=end_top_log_probs,
+ end_top_index=end_top_index,
+ cls_logits=cls_logits,
+ mems=transformer_outputs.mems,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/tokenization_xlnet.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/tokenization_xlnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..8d87f34ba2462e44c1286f70a9a122267c890c14
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/tokenization_xlnet.py
@@ -0,0 +1,383 @@
+# coding=utf-8
+# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Tokenization classes for XLNet model."""
+
+
+import os
+import unicodedata
+from shutil import copyfile
+from typing import Any, Dict, List, Optional, Tuple
+
+import sentencepiece as spm
+
+from ...tokenization_utils import AddedToken, PreTrainedTokenizer
+from ...utils import SPIECE_UNDERLINE, logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
+
+
+# Segments (not really needed)
+SEG_ID_A = 0
+SEG_ID_B = 1
+SEG_ID_CLS = 2
+SEG_ID_SEP = 3
+SEG_ID_PAD = 4
+
+
+class XLNetTokenizer(PreTrainedTokenizer):
+ """
+ Construct an XLNet tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that
+ contains the vocabulary necessary to instantiate a tokenizer.
+ do_lower_case (`bool`, *optional*, defaults to `False`):
+ Whether to lowercase the input when tokenizing.
+ remove_space (`bool`, *optional*, defaults to `True`):
+ Whether to strip the text when tokenizing (removing excess spaces before and after the string).
+ keep_accents (`bool`, *optional*, defaults to `False`):
+ Whether to keep accents when tokenizing.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
+ The token used is the `sep_token`.
+
+
+
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ cls_token (`str`, *optional*, defaults to `""`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ mask_token (`str`, *optional*, defaults to `""`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ additional_special_tokens (`List[str]`, *optional*, defaults to `['', '']`):
+ Additional special tokens used by the tokenizer.
+ sp_model_kwargs (`dict`, *optional*):
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
+ to set:
+
+ - `enable_sampling`: Enable subword regularization.
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
+
+ - `nbest_size = {0,1}`: No sampling is performed.
+ - `nbest_size > 1`: samples from the nbest_size results.
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
+ using forward-filtering-and-backward-sampling algorithm.
+
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
+ BPE-dropout.
+
+ Attributes:
+ sp_model (`SentencePieceProcessor`):
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ padding_side = "left"
+
+ def __init__(
+ self,
+ vocab_file,
+ do_lower_case=False,
+ remove_space=True,
+ keep_accents=False,
+ bos_token="",
+ eos_token="",
+ unk_token="",
+ sep_token="",
+ pad_token="",
+ cls_token="",
+ mask_token="",
+ additional_special_tokens=["", ""],
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
+ **kwargs,
+ ) -> None:
+ # Mask token behave like a normal word, i.e. include the space before it
+ mask_token = AddedToken(mask_token, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token
+
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+
+ self.do_lower_case = do_lower_case
+ self.remove_space = remove_space
+ self.keep_accents = keep_accents
+ self.vocab_file = vocab_file
+
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(vocab_file)
+
+ super().__init__(
+ do_lower_case=do_lower_case,
+ remove_space=remove_space,
+ keep_accents=keep_accents,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ additional_special_tokens=additional_special_tokens,
+ sp_model_kwargs=self.sp_model_kwargs,
+ **kwargs,
+ )
+
+ self._pad_token_type_id = 3
+
+ @property
+ def vocab_size(self):
+ return len(self.sp_model)
+
+ def get_vocab(self):
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["sp_model"] = None
+ return state
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+
+ # for backward compatibility
+ if not hasattr(self, "sp_model_kwargs"):
+ self.sp_model_kwargs = {}
+
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(self.vocab_file)
+
+ def preprocess_text(self, inputs):
+ if self.remove_space:
+ outputs = " ".join(inputs.strip().split())
+ else:
+ outputs = inputs
+ outputs = outputs.replace("``", '"').replace("''", '"')
+
+ if not self.keep_accents:
+ outputs = unicodedata.normalize("NFKD", outputs)
+ outputs = "".join([c for c in outputs if not unicodedata.combining(c)])
+ if self.do_lower_case:
+ outputs = outputs.lower()
+
+ return outputs
+
+ def _tokenize(self, text: str) -> List[str]:
+ """Tokenize a string."""
+ text = self.preprocess_text(text)
+ pieces = self.sp_model.encode(text, out_type=str)
+ new_pieces = []
+ for piece in pieces:
+ if len(piece) > 1 and piece[-1] == str(",") and piece[-2].isdigit():
+ cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, ""))
+ if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
+ if len(cur_pieces[0]) == 1:
+ cur_pieces = cur_pieces[1:]
+ else:
+ cur_pieces[0] = cur_pieces[0][1:]
+ cur_pieces.append(piece[-1])
+ new_pieces.extend(cur_pieces)
+ else:
+ new_pieces.append(piece)
+
+ return new_pieces
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.sp_model.PieceToId(token)
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.sp_model.IdToPiece(index)
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (strings for sub-words) in a single string."""
+ out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
+ return out_string
+
+ def _decode(
+ self,
+ token_ids: List[int],
+ skip_special_tokens: bool = False,
+ clean_up_tokenization_spaces: bool = None,
+ spaces_between_special_tokens: bool = True,
+ **kwargs,
+ ) -> str:
+ self._decode_use_source_tokenizer = kwargs.pop("use_source_tokenizer", False)
+
+ filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
+
+ # To avoid mixing byte-level and unicode for byte-level BPT
+ # we need to build string separately for added tokens and byte-level tokens
+ # cf. https://github.com/huggingface/transformers/issues/1133
+ sub_texts = []
+ current_sub_text = []
+ for token in filtered_tokens:
+ if skip_special_tokens and token in self.all_special_ids:
+ continue
+ if token in self.added_tokens_encoder:
+ if current_sub_text:
+ sub_texts.append(self.convert_tokens_to_string(current_sub_text))
+ current_sub_text = []
+ sub_texts.append(token)
+ else:
+ current_sub_text.append(token)
+ if current_sub_text:
+ sub_texts.append(self.convert_tokens_to_string(current_sub_text))
+
+ # Mimic the behavior of the Rust tokenizer:
+ # By default, there are no spaces between special tokens
+ text = "".join(sub_texts)
+
+ clean_up_tokenization_spaces = (
+ clean_up_tokenization_spaces
+ if clean_up_tokenization_spaces is not None
+ else self.clean_up_tokenization_spaces
+ )
+ if clean_up_tokenization_spaces:
+ clean_text = self.clean_up_tokenization(text)
+ return clean_text
+ else:
+ return text
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. An XLNet sequence has the following format:
+
+ - single sequence: `X `
+ - pair of sequences: `A B `
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+ if token_ids_1 is None:
+ return token_ids_0 + sep + cls
+ return token_ids_0 + sep + token_ids_1 + sep + cls
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is not None:
+ return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1, 1]
+ return ([0] * len(token_ids_0)) + [1, 1]
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet
+ sequence pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls_segment_id = [2]
+
+ if token_ids_1 is None:
+ return len(token_ids_0 + sep) * [0] + cls_segment_id
+ return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + cls_segment_id
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+ elif not os.path.isfile(self.vocab_file):
+ with open(out_vocab_file, "wb") as fi:
+ content_spiece_model = self.sp_model.serialized_model_proto()
+ fi.write(content_spiece_model)
+
+ return (out_vocab_file,)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/tokenization_xlnet_fast.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/tokenization_xlnet_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..d77307e7a3dfbac9dd42fcf9eb0aa053cabca5ca
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlnet/tokenization_xlnet_fast.py
@@ -0,0 +1,232 @@
+# coding=utf-8
+# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Tokenization classes for XLNet model."""
+
+
+import os
+from shutil import copyfile
+from typing import List, Optional, Tuple
+
+from ...tokenization_utils import AddedToken
+from ...tokenization_utils_fast import PreTrainedTokenizerFast
+from ...utils import is_sentencepiece_available, logging
+
+
+if is_sentencepiece_available():
+ from .tokenization_xlnet import XLNetTokenizer
+else:
+ XLNetTokenizer = None
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
+
+
+SPIECE_UNDERLINE = "▁"
+
+# Segments (not really needed)
+SEG_ID_A = 0
+SEG_ID_B = 1
+SEG_ID_CLS = 2
+SEG_ID_SEP = 3
+SEG_ID_PAD = 4
+
+
+class XLNetTokenizerFast(PreTrainedTokenizerFast):
+ """
+ Construct a "fast" XLNet tokenizer (backed by HuggingFace's *tokenizers* library). Based on
+ [Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models).
+
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
+ refer to this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that
+ contains the vocabulary necessary to instantiate a tokenizer.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether to lowercase the input when tokenizing.
+ remove_space (`bool`, *optional*, defaults to `True`):
+ Whether to strip the text when tokenizing (removing excess spaces before and after the string).
+ keep_accents (`bool`, *optional*, defaults to `False`):
+ Whether to keep accents when tokenizing.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
+ The token used is the `sep_token`.
+
+
+
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ cls_token (`str`, *optional*, defaults to `""`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ mask_token (`str`, *optional*, defaults to `""`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ additional_special_tokens (`List[str]`, *optional*, defaults to `["", ""]`):
+ Additional special tokens used by the tokenizer.
+
+ Attributes:
+ sp_model (`SentencePieceProcessor`):
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ padding_side = "left"
+ slow_tokenizer_class = XLNetTokenizer
+
+ def __init__(
+ self,
+ vocab_file=None,
+ tokenizer_file=None,
+ do_lower_case=False,
+ remove_space=True,
+ keep_accents=False,
+ bos_token="",
+ eos_token="",
+ unk_token="",
+ sep_token="",
+ pad_token="",
+ cls_token="",
+ mask_token="",
+ additional_special_tokens=["", ""],
+ **kwargs,
+ ):
+ # Mask token behave like a normal word, i.e. include the space before it
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
+
+ super().__init__(
+ vocab_file=vocab_file,
+ tokenizer_file=tokenizer_file,
+ do_lower_case=do_lower_case,
+ remove_space=remove_space,
+ keep_accents=keep_accents,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ additional_special_tokens=additional_special_tokens,
+ **kwargs,
+ )
+
+ self._pad_token_type_id = 3
+ self.do_lower_case = do_lower_case
+ self.remove_space = remove_space
+ self.keep_accents = keep_accents
+ self.vocab_file = vocab_file
+
+ @property
+ def can_save_slow_tokenizer(self) -> bool:
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. An XLNet sequence has the following format:
+
+ - single sequence: `X `
+ - pair of sequences: `A B `
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+ if token_ids_1 is None:
+ return token_ids_0 + sep + cls
+ return token_ids_0 + sep + token_ids_1 + sep + cls
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet
+ sequence pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls_segment_id = [2]
+
+ if token_ids_1 is None:
+ return len(token_ids_0 + sep) * [0] + cls_segment_id
+ return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + cls_segment_id
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not self.can_save_slow_tokenizer:
+ raise ValueError(
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
+ "tokenizer."
+ )
+
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+
+ return (out_vocab_file,)