diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/autoformer/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/autoformer/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f87bfdea532d61d4bc63802eced65f108328e666
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/autoformer/__init__.py
@@ -0,0 +1,63 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+# rely on isort to merge the imports
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
+
+
+_import_structure = {
+ "configuration_autoformer": [
+ "AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
+ "AutoformerConfig",
+ ],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_autoformer"] = [
+ "AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "AutoformerForPrediction",
+ "AutoformerModel",
+ "AutoformerPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_autoformer import (
+ AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
+ AutoformerConfig,
+ )
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_autoformer import (
+ AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
+ AutoformerForPrediction,
+ AutoformerModel,
+ AutoformerPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a1ce89620035d50db1c4e1878763cddec62f94f2
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/__init__.py
@@ -0,0 +1,73 @@
+# Copyright 2022 Salesforce authors, The EleutherAI, and HuggingFace Teams. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
+
+
+_import_structure = {
+ "configuration_codegen": ["CODEGEN_PRETRAINED_CONFIG_ARCHIVE_MAP", "CodeGenConfig", "CodeGenOnnxConfig"],
+ "tokenization_codegen": ["CodeGenTokenizer"],
+}
+
+try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_codegen_fast"] = ["CodeGenTokenizerFast"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_codegen"] = [
+ "CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "CodeGenForCausalLM",
+ "CodeGenModel",
+ "CodeGenPreTrainedModel",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_codegen import CODEGEN_PRETRAINED_CONFIG_ARCHIVE_MAP, CodeGenConfig, CodeGenOnnxConfig
+ from .tokenization_codegen import CodeGenTokenizer
+
+ try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_codegen_fast import CodeGenTokenizerFast
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_codegen import (
+ CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST,
+ CodeGenForCausalLM,
+ CodeGenModel,
+ CodeGenPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a3889c0f2741687a7064de057a54119c58897027
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/configuration_codegen.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/configuration_codegen.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e2b80bb548857a747e1fbe8775ed80c6cf9b9fb7
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/configuration_codegen.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/modeling_codegen.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/modeling_codegen.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0c6e5efda671f8022060b303ef84fddba5b2fcd7
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/modeling_codegen.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/tokenization_codegen.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/tokenization_codegen.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1a94f7c238512b706ca0bba96f132713766894fb
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/tokenization_codegen.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/tokenization_codegen_fast.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/tokenization_codegen_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4deac36d2efafec0a15114e5277a41ddba8251d2
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/tokenization_codegen_fast.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/configuration_codegen.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/configuration_codegen.py
new file mode 100644
index 0000000000000000000000000000000000000000..73c019870f1f6a4d305489de5f762adf351917c8
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/configuration_codegen.py
@@ -0,0 +1,242 @@
+# coding=utf-8
+# Copyright 2022 Salesforce authors, The EleutherAI, and HuggingFace Teams. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" CodeGen model configuration"""
+from collections import OrderedDict
+from typing import Any, List, Mapping, Optional
+
+from ... import PreTrainedTokenizer, TensorType, is_torch_available
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxConfigWithPast, PatchingSpec
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+CODEGEN_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
+ "Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
+ "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
+ "Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
+ "Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
+ "Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
+ "Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
+ "Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
+ "Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
+ "Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
+ "Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
+ "Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
+}
+
+
+class CodeGenConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`CodeGenModel`]. It is used to instantiate a
+ CodeGen model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the CodeGen
+ [Salesforce/codegen-2B-mono](https://huggingface.co/Salesforce/codegen-2B-mono) architecture. Configuration objects
+ inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from
+ [`PretrainedConfig`] for more information.
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 50400):
+ Vocabulary size of the CodeGen model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`CodeGenModel`].
+ n_positions (`int`, *optional*, defaults to 2048):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ n_ctx (`int`, *optional*, defaults to 2048):
+ This attribute is used in `CodeGenModel.__init__` without any real effect.
+ n_embd (`int`, *optional*, defaults to 4096):
+ Dimensionality of the embeddings and hidden states.
+ n_layer (`int`, *optional*, defaults to 28):
+ Number of hidden layers in the Transformer encoder.
+ n_head (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ rotary_dim (`int`, *optional*, defaults to 64):
+ Number of dimensions in the embedding that Rotary Position Embedding is applied to.
+ n_inner (`int`, *optional*):
+ Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
+ activation_function (`str`, *optional*, defaults to `"gelu_new"`):
+ Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
+ resid_pdrop (`float`, *optional*, defaults to 0.0):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ embd_pdrop (`int`, *optional*, defaults to 0.0):
+ The dropout ratio for the embeddings.
+ attn_pdrop (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention.
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
+ The epsilon to use in the layer normalization layers.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models).
+ bos_token_id (`int`, *optional*, defaults to 50256):
+ Beginning of stream token id.
+ eos_token_id (`int`, *optional*, defaults to 50256):
+ End of stream token id.
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
+ Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the
+ model has a output word embedding layer.
+
+ Example:
+
+ ```python
+ >>> from transformers import CodeGenConfig, CodeGenModel
+
+ >>> # Initializing a CodeGen 6B configuration
+ >>> configuration = CodeGenConfig()
+
+ >>> # Initializing a model (with random weights) from the configuration
+ >>> model = CodeGenModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "codegen"
+ attribute_map = {
+ "max_position_embeddings": "n_positions",
+ "hidden_size": "n_embd",
+ "num_attention_heads": "n_head",
+ "num_hidden_layers": "n_layer",
+ }
+
+ def __init__(
+ self,
+ vocab_size=50400,
+ n_positions=2048,
+ n_ctx=2048,
+ n_embd=4096,
+ n_layer=28,
+ n_head=16,
+ rotary_dim=64,
+ n_inner=None,
+ activation_function="gelu_new",
+ resid_pdrop=0.0,
+ embd_pdrop=0.0,
+ attn_pdrop=0.0,
+ layer_norm_epsilon=1e-5,
+ initializer_range=0.02,
+ use_cache=True,
+ bos_token_id=50256,
+ eos_token_id=50256,
+ tie_word_embeddings=False,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.n_ctx = n_ctx
+ self.n_positions = n_positions
+ self.n_embd = n_embd
+ self.n_layer = n_layer
+ self.n_head = n_head
+ self.n_inner = n_inner
+ self.rotary_dim = rotary_dim
+ self.activation_function = activation_function
+ self.resid_pdrop = resid_pdrop
+ self.embd_pdrop = embd_pdrop
+ self.attn_pdrop = attn_pdrop
+ self.layer_norm_epsilon = layer_norm_epsilon
+ self.initializer_range = initializer_range
+ self.use_cache = use_cache
+
+ self.bos_token_id = bos_token_id
+ self.eos_token_id = eos_token_id
+
+ super().__init__(
+ bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs
+ )
+
+
+# Copied from transformers.models.gpt2.configuration_gpt2.GPT2OnnxConfig
+class CodeGenOnnxConfig(OnnxConfigWithPast):
+ def __init__(
+ self,
+ config: PretrainedConfig,
+ task: str = "default",
+ patching_specs: List[PatchingSpec] = None,
+ use_past: bool = False,
+ ):
+ super().__init__(config, task=task, patching_specs=patching_specs, use_past=use_past)
+ if not getattr(self._config, "pad_token_id", None):
+ # TODO: how to do that better?
+ self._config.pad_token_id = 0
+
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ common_inputs = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
+ if self.use_past:
+ self.fill_with_past_key_values_(common_inputs, direction="inputs")
+ common_inputs["attention_mask"] = {0: "batch", 1: "past_sequence + sequence"}
+ else:
+ common_inputs["attention_mask"] = {0: "batch", 1: "sequence"}
+
+ return common_inputs
+
+ @property
+ def num_layers(self) -> int:
+ return self._config.n_layer
+
+ @property
+ def num_attention_heads(self) -> int:
+ return self._config.n_head
+
+ def generate_dummy_inputs(
+ self,
+ tokenizer: PreTrainedTokenizer,
+ batch_size: int = -1,
+ seq_length: int = -1,
+ is_pair: bool = False,
+ framework: Optional[TensorType] = None,
+ ) -> Mapping[str, Any]:
+ common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
+ )
+
+ # We need to order the input in the way they appears in the forward()
+ ordered_inputs = OrderedDict({"input_ids": common_inputs["input_ids"]})
+
+ # Need to add the past_keys
+ if self.use_past:
+ if not is_torch_available():
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
+ else:
+ import torch
+
+ batch, seqlen = common_inputs["input_ids"].shape
+ # Not using the same length for past_key_values
+ past_key_values_length = seqlen + 2
+ past_shape = (
+ batch,
+ self.num_attention_heads,
+ past_key_values_length,
+ self._config.hidden_size // self.num_attention_heads,
+ )
+ ordered_inputs["past_key_values"] = [
+ (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(self.num_layers)
+ ]
+
+ ordered_inputs["attention_mask"] = common_inputs["attention_mask"]
+ if self.use_past:
+ mask_dtype = ordered_inputs["attention_mask"].dtype
+ ordered_inputs["attention_mask"] = torch.cat(
+ [ordered_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1
+ )
+
+ return ordered_inputs
+
+ @property
+ def default_onnx_opset(self) -> int:
+ return 13
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/modeling_codegen.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/modeling_codegen.py
new file mode 100644
index 0000000000000000000000000000000000000000..f37ceccaace988a78e1b72499e01d81912e5dd83
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/modeling_codegen.py
@@ -0,0 +1,733 @@
+# coding=utf-8
+# Copyright 2022 Salesforce authors, The EleutherAI, and HuggingFace Teams. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch CodeGen model."""
+
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import CrossEntropyLoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
+from ...modeling_utils import PreTrainedModel
+from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
+from .configuration_codegen import CodeGenConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "Salesforce/codegen-2B-mono"
+_CONFIG_FOR_DOC = "CodeGenConfig"
+
+
+CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "Salesforce/codegen-350M-nl",
+ "Salesforce/codegen-350M-multi",
+ "Salesforce/codegen-350M-mono",
+ "Salesforce/codegen-2B-nl",
+ "Salesforce/codegen-2B-multi",
+ "Salesforce/codegen-2B-mono",
+ "Salesforce/codegen-6B-nl",
+ "Salesforce/codegen-6B-multi",
+ "Salesforce/codegen-6B-mono",
+ "Salesforce/codegen-16B-nl",
+ "Salesforce/codegen-16B-multi",
+ "Salesforce/codegen-16B-mono",
+ # See all CodeGen models at https://huggingface.co/models?filter=codegen
+]
+
+
+# Copied from transformers.models.gptj.modeling_gptj.create_sinusoidal_positions
+def create_sinusoidal_positions(num_pos: int, dim: int) -> torch.Tensor:
+ inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, dtype=torch.int64) / dim))
+ sinusoid_inp = torch.einsum("i , j -> i j", torch.arange(num_pos, dtype=torch.int64).float(), inv_freq).float()
+ return torch.cat((torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)), dim=1)
+
+
+# Copied from transformers.models.gptj.modeling_gptj.rotate_every_two
+def rotate_every_two(x: torch.Tensor) -> torch.Tensor:
+ x1 = x[:, :, :, ::2]
+ x2 = x[:, :, :, 1::2]
+ x = torch.stack((-x2, x1), dim=-1)
+ return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)')
+
+
+# Copied from transformers.models.gptj.modeling_gptj.apply_rotary_pos_emb
+def apply_rotary_pos_emb(tensor: torch.Tensor, sin: torch.Tensor, cos: torch.Tensor) -> torch.Tensor:
+ sin = torch.repeat_interleave(sin[:, :, None, :], 2, 3)
+ cos = torch.repeat_interleave(cos[:, :, None, :], 2, 3)
+ return (tensor * cos) + (rotate_every_two(tensor) * sin)
+
+
+class CodeGenAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+
+ max_positions = config.max_position_embeddings
+ self.register_buffer(
+ "causal_mask",
+ torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(
+ 1, 1, max_positions, max_positions
+ ),
+ persistent=False,
+ )
+
+ self.attn_dropout = nn.Dropout(config.attn_pdrop)
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
+
+ self.embed_dim = config.hidden_size
+ self.num_attention_heads = config.num_attention_heads
+ self.head_dim = self.embed_dim // self.num_attention_heads
+ if self.head_dim * self.num_attention_heads != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and"
+ f" `num_attention_heads`: {self.num_attention_heads})."
+ )
+ self.scale_attn = torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float32)).to(torch.get_default_dtype())
+ self.qkv_proj = nn.Linear(self.embed_dim, self.embed_dim * 3, bias=False)
+
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
+ self.rotary_dim = config.rotary_dim
+ pos_embd_dim = self.rotary_dim or self.embed_dim
+ self.embed_positions = create_sinusoidal_positions(max_positions, pos_embd_dim)
+
+ def _split_heads(self, x, n_head, dim_head, mp_num):
+ reshaped = x.reshape(x.shape[:-1] + (n_head // mp_num, dim_head))
+ reshaped = reshaped.reshape(x.shape[:-2] + (-1,) + reshaped.shape[-1:])
+ return reshaped
+
+ def _merge_heads(self, tensor, num_attention_heads, attn_head_size):
+ """
+ Merges attn_head_size dim and num_attn_heads dim into n_ctx
+ """
+ if len(tensor.shape) == 5:
+ tensor = tensor.permute(0, 1, 3, 2, 4).contiguous()
+ elif len(tensor.shape) == 4:
+ tensor = tensor.permute(0, 2, 1, 3).contiguous()
+ else:
+ raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}")
+ new_shape = tensor.size()[:-2] + (num_attention_heads * attn_head_size,)
+ return tensor.view(new_shape)
+
+ def _attn(
+ self,
+ query,
+ key,
+ value,
+ attention_mask=None,
+ head_mask=None,
+ ):
+ # compute causal mask from causal mask buffer
+ query_length, key_length = query.size(-2), key.size(-2)
+ causal_mask = self.causal_mask[:, :, key_length - query_length : key_length, :key_length]
+
+ # Keep the attention weights computation in fp32 to avoid overflow issues
+ query = query.to(torch.float32)
+ key = key.to(torch.float32)
+
+ attn_weights = torch.matmul(query, key.transpose(-1, -2))
+
+ attn_weights = attn_weights / self.scale_attn
+ mask_value = torch.finfo(attn_weights.dtype).min
+ # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
+ # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
+ mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
+ attn_weights = torch.where(causal_mask, attn_weights, mask_value)
+
+ if attention_mask is not None:
+ # Apply the attention mask
+ attn_weights = attn_weights + attention_mask
+
+ attn_weights = nn.Softmax(dim=-1)(attn_weights)
+ attn_weights = attn_weights.to(value.dtype)
+ attn_weights = self.attn_dropout(attn_weights)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attn_weights = attn_weights * head_mask
+
+ attn_output = torch.matmul(attn_weights, value)
+
+ return attn_output, attn_weights
+
+ def forward(
+ self,
+ hidden_states: Optional[torch.FloatTensor],
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = False,
+ output_attentions: Optional[bool] = False,
+ ) -> Union[
+ Tuple[torch.Tensor, Tuple[torch.Tensor]],
+ Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]],
+ ]:
+ qkv = self.qkv_proj(hidden_states)
+ # TODO(enijkamp): factor out number of logical TPU-v4 cores or make forward pass agnostic
+ mp_num = 4
+ qkv_split = qkv.reshape(qkv.shape[:-1] + (mp_num, -1))
+
+ local_dim = self.head_dim * self.num_attention_heads // mp_num
+ query, value, key = torch.split(qkv_split, local_dim, dim=-1)
+ query = self._split_heads(query, self.num_attention_heads, self.head_dim, mp_num=mp_num)
+ key = self._split_heads(key, self.num_attention_heads, self.head_dim, mp_num=mp_num)
+
+ value = self._split_heads(value, self.num_attention_heads, self.head_dim, mp_num=mp_num)
+ value = value.permute(0, 2, 1, 3)
+
+ embed_positions = self.embed_positions
+ if embed_positions.device != position_ids.device:
+ embed_positions = embed_positions.to(position_ids.device)
+ self.embed_positions = embed_positions
+
+ sincos = embed_positions[position_ids]
+ sin, cos = torch.split(sincos, sincos.shape[-1] // 2, dim=-1)
+
+ if self.rotary_dim is not None:
+ k_rot = key[:, :, :, : self.rotary_dim]
+ k_pass = key[:, :, :, self.rotary_dim :]
+
+ q_rot = query[:, :, :, : self.rotary_dim]
+ q_pass = query[:, :, :, self.rotary_dim :]
+
+ k_rot = apply_rotary_pos_emb(k_rot, sin, cos)
+ q_rot = apply_rotary_pos_emb(q_rot, sin, cos)
+
+ key = torch.cat([k_rot, k_pass], dim=-1)
+ query = torch.cat([q_rot, q_pass], dim=-1)
+ else:
+ key = apply_rotary_pos_emb(key, sin, cos)
+ query = apply_rotary_pos_emb(query, sin, cos)
+
+ key = key.permute(0, 2, 1, 3)
+ query = query.permute(0, 2, 1, 3)
+
+ if layer_past is not None:
+ past_key = layer_past[0]
+ past_value = layer_past[1]
+ key = torch.cat((past_key, key), dim=-2)
+ value = torch.cat((past_value, value), dim=-2)
+
+ if use_cache is True:
+ # Note that this cast is quite ugly, but is not implemented before ROPE as k_rot in the original codebase is always in fp32.
+ # Reference: https://github.com/salesforce/CodeGen/blob/f210c3bb1216c975ad858cd4132c0fdeabf4bfc2/codegen1/jaxformer/hf/codegen/modeling_codegen.py#L38
+ present = (key.to(hidden_states.dtype), value)
+ else:
+ present = None
+
+ # compute self-attention: V x Softmax(QK^T)
+ attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
+
+ attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_dim)
+ attn_output = self.out_proj(attn_output)
+ attn_output = self.resid_dropout(attn_output)
+
+ outputs = (attn_output, present)
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs # a, present, (attentions)
+
+
+# Copied from transformers.models.gptj.modeling_gptj.GPTJMLP with GPTJ->CodeGen
+class CodeGenMLP(nn.Module):
+ def __init__(self, intermediate_size, config): # in MLP: intermediate_size= 4 * embed_dim
+ super().__init__()
+ embed_dim = config.n_embd
+
+ self.fc_in = nn.Linear(embed_dim, intermediate_size)
+ self.fc_out = nn.Linear(intermediate_size, embed_dim)
+
+ self.act = ACT2FN[config.activation_function]
+ self.dropout = nn.Dropout(config.resid_pdrop)
+
+ def forward(self, hidden_states: Optional[torch.FloatTensor]) -> torch.FloatTensor:
+ hidden_states = self.fc_in(hidden_states)
+ hidden_states = self.act(hidden_states)
+ hidden_states = self.fc_out(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.gptj.modeling_gptj.GPTJBlock with GPTJ->CodeGen
+class CodeGenBlock(nn.Module):
+ # Ignore copy
+ def __init__(self, config):
+ super().__init__()
+ inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd
+ self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
+ self.attn = CodeGenAttention(config)
+ self.mlp = CodeGenMLP(inner_dim, config)
+
+ def forward(
+ self,
+ hidden_states: Optional[torch.FloatTensor],
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = False,
+ output_attentions: Optional[bool] = False,
+ ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
+ residual = hidden_states
+ hidden_states = self.ln_1(hidden_states)
+ attn_outputs = self.attn(
+ hidden_states=hidden_states,
+ layer_past=layer_past,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ )
+ attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
+ outputs = attn_outputs[1:]
+
+ feed_forward_hidden_states = self.mlp(hidden_states)
+ hidden_states = attn_output + feed_forward_hidden_states + residual
+
+ if use_cache:
+ outputs = (hidden_states,) + outputs
+ else:
+ outputs = (hidden_states,) + outputs[1:]
+
+ return outputs # hidden_states, present, (attentions)
+
+
+class CodeGenPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = CodeGenConfig
+ base_model_prefix = "transformer"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["CodeGenBlock"]
+ _skip_keys_device_placement = "past_key_values"
+
+ def __init__(self, *inputs, **kwargs):
+ super().__init__(*inputs, **kwargs)
+
+ def _init_weights(self, module):
+ """Initialize the weights."""
+ if isinstance(module, (nn.Linear,)):
+ # Slightly different from Mesh Transformer JAX which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+CODEGEN_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`CodeGenConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+CODEGEN_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoProcenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.n_positions - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ head_mask (`torch.FloatTensor` of shape `(num_attention_heads,)` or `(n_layer, num_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_dim)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare CodeGen Model transformer outputting raw hidden-states without any specific head on top.",
+ CODEGEN_START_DOCSTRING,
+)
+class CodeGenModel(CodeGenPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.embed_dim = config.n_embd
+ self.vocab_size = config.vocab_size
+ self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
+ self.drop = nn.Dropout(config.embd_pdrop)
+ self.h = nn.ModuleList([CodeGenBlock(config) for _ in range(config.n_layer)])
+ self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
+ self.rotary_dim = min(config.rotary_dim, config.n_ctx // config.num_attention_heads)
+
+ self.gradient_checkpointing = False
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.wte
+
+ def set_input_embeddings(self, new_embeddings):
+ self.wte = new_embeddings
+
+ @add_start_docstrings_to_model_forward(CODEGEN_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithPast,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ input_ids = input_ids.view(-1, input_shape[-1])
+ batch_size = input_ids.shape[0]
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ batch_size = inputs_embeds.shape[0]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ if token_type_ids is not None:
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
+
+ if past_key_values is None:
+ past_length = 0
+ past_key_values = tuple([None] * len(self.h))
+ else:
+ past_length = past_key_values[0][0].size(-2)
+
+ if position_ids is None:
+ position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
+ position_ids = position_ids.unsqueeze(0)
+
+ # Attention mask.
+ if attention_mask is not None:
+ if batch_size <= 0:
+ raise ValueError("batch_size has to be defined and > 0")
+ attention_mask = attention_mask.view(batch_size, -1)
+ # We create a 3D attention mask from a 2D tensor mask.
+ # Sizes are [batch_size, 1, 1, to_seq_length]
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
+ # this attention mask is more simple than the triangular masking of causal attention
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
+ attention_mask = attention_mask[:, None, None, :]
+
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
+ # masked positions, this operation will create a tensor which is 0.0 for
+ # positions we want to attend and the dtype's smallest value for masked positions.
+ # Since we are adding it to the raw scores before the softmax, this is
+ # effectively the same as removing these entirely.
+ attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
+ attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x num_attention_heads x N x N
+ # head_mask has shape n_layer x batch x num_attention_heads x N x N
+ head_mask = self.get_head_mask(head_mask, self.config.n_layer)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.wte(input_ids)
+
+ hidden_states = inputs_embeds
+
+ if token_type_ids is not None:
+ token_type_embeds = self.wte(token_type_ids)
+ hidden_states = hidden_states + token_type_embeds
+
+ hidden_states = self.drop(hidden_states)
+
+ output_shape = input_shape + (hidden_states.size(-1),)
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
+ "`use_cache=False`..."
+ )
+ use_cache = False
+
+ presents = () if use_cache else None
+ all_self_attentions = () if output_attentions else None
+ all_hidden_states = () if output_hidden_states else None
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ outputs = self._gradient_checkpointing_func(
+ block.__call__,
+ hidden_states,
+ None,
+ attention_mask,
+ position_ids,
+ head_mask[i],
+ use_cache,
+ output_attentions,
+ )
+ else:
+ outputs = block(
+ hidden_states=hidden_states,
+ layer_past=layer_past,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ head_mask=head_mask[i],
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = outputs[0]
+ if use_cache is True:
+ presents = presents + (outputs[1],)
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
+
+ hidden_states = self.ln_f(hidden_states)
+
+ hidden_states = hidden_states.view(output_shape)
+ # Add last hidden state
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
+
+ return BaseModelOutputWithPast(
+ last_hidden_state=hidden_states,
+ past_key_values=presents,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ The CodeGen Model transformer with a language modeling head on top.
+ """,
+ CODEGEN_START_DOCSTRING,
+)
+class CodeGenForCausalLM(CodeGenPreTrainedModel):
+ _tied_weights_keys = ["lm_head.weight"]
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.transformer = CodeGenModel(config)
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs):
+ token_type_ids = kwargs.get("token_type_ids", None)
+ # Omit tokens covered by past_key_values
+ if past_key_values:
+ past_length = past_key_values[0][0].shape[2]
+
+ # Some generation methods already pass only the last input ID
+ if input_ids.shape[1] > past_length:
+ remove_prefix_length = past_length
+ else:
+ # Default to old behavior: keep only final ID
+ remove_prefix_length = input_ids.shape[1] - 1
+
+ input_ids = input_ids[:, remove_prefix_length:]
+ if token_type_ids is not None:
+ token_type_ids = token_type_ids[:, -input_ids.shape[1] :]
+
+ attention_mask = kwargs.get("attention_mask", None)
+ position_ids = kwargs.get("position_ids", None)
+
+ if attention_mask is not None and position_ids is None:
+ # create position_ids on the fly for batch generation
+ position_ids = attention_mask.long().cumsum(-1) - 1
+ position_ids.masked_fill_(attention_mask == 0, 1)
+ if past_key_values:
+ position_ids = position_ids[:, -input_ids.shape[1] :]
+
+ return {
+ "input_ids": input_ids,
+ "past_key_values": past_key_values,
+ "use_cache": kwargs.get("use_cache"),
+ "position_ids": position_ids,
+ "attention_mask": attention_mask,
+ "token_type_ids": token_type_ids,
+ }
+
+ @add_start_docstrings_to_model_forward(CODEGEN_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=CausalLMOutputWithPast,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.transformer(
+ input_ids,
+ past_key_values=past_key_values,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = transformer_outputs[0]
+
+ # make sure sampling in fp16 works correctly and
+ # compute loss in fp32 to match with mesh-tf version
+ # https://github.com/EleutherAI/gpt-neo/blob/89ce74164da2fb16179106f54e2269b5da8db333/models/gpt2/gpt2.py#L179
+ lm_logits = self.lm_head(hidden_states).to(torch.float32)
+
+ loss = None
+ if labels is not None:
+ # move labels to correct device to enable model parallelism
+ labels = labels.to(lm_logits.device)
+ # Shift so that tokens < n predict n
+ shift_logits = lm_logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous()
+ # Flatten the tokens
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
+
+ loss = loss.to(hidden_states.dtype)
+
+ if not return_dict:
+ output = (lm_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return CausalLMOutputWithPast(
+ loss=loss,
+ logits=lm_logits,
+ past_key_values=transformer_outputs.past_key_values,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+ @staticmethod
+ def _reorder_cache(
+ past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
+ ) -> Tuple[Tuple[torch.Tensor]]:
+ """
+ This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or
+ [`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
+ beam_idx at every generation step.
+ """
+ return tuple(
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
+ for layer_past in past_key_values
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/tokenization_codegen.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/tokenization_codegen.py
new file mode 100644
index 0000000000000000000000000000000000000000..c79a6d46e4ad34b4f2e02fb7c7ba97decacf156c
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/tokenization_codegen.py
@@ -0,0 +1,396 @@
+# coding=utf-8
+# Copyright 2022 The Salesforce authors, The Open AI Team Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for CodeGen"""
+
+
+import json
+import os
+from functools import lru_cache
+from typing import TYPE_CHECKING, List, Optional, Tuple, Union
+
+import numpy as np
+import regex as re
+
+from ...utils import is_tf_available, is_torch_available, logging, to_py_obj
+
+
+if TYPE_CHECKING:
+ if is_torch_available():
+ import torch
+ if is_tf_available():
+ import tensorflow as tf
+
+from ...tokenization_utils import AddedToken, PreTrainedTokenizer
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {
+ "vocab_file": "vocab.json",
+ "merges_file": "merges.txt",
+}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {
+ "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
+ },
+ "merges_file": {
+ "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
+ },
+}
+
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
+ "Salesforce/codegen-350M-mono": 2048,
+}
+
+
+@lru_cache()
+def bytes_to_unicode():
+ """
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
+ characters the bpe code barfs on.
+
+ The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
+ if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
+ decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
+ tables between utf-8 bytes and unicode strings.
+ """
+ bs = (
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
+ )
+ cs = bs[:]
+ n = 0
+ for b in range(2**8):
+ if b not in bs:
+ bs.append(b)
+ cs.append(2**8 + n)
+ n += 1
+ cs = [chr(n) for n in cs]
+ return dict(zip(bs, cs))
+
+
+def get_pairs(word):
+ """
+ Return set of symbol pairs in a word.
+
+ Word is represented as tuple of symbols (symbols being variable-length strings).
+ """
+ pairs = set()
+ prev_char = word[0]
+ for char in word[1:]:
+ pairs.add((prev_char, char))
+ prev_char = char
+ return pairs
+
+
+class CodeGenTokenizer(PreTrainedTokenizer):
+ """
+ Construct a CodeGen tokenizer. Based on byte-level Byte-Pair-Encoding.
+
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
+
+ ```python
+ >>> from transformers import CodeGenTokenizer
+
+ >>> tokenizer = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono")
+ >>> tokenizer("Hello world")["input_ids"]
+ [15496, 995]
+
+ >>> tokenizer(" Hello world")["input_ids"]
+ [18435, 995]
+ ```
+
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
+ call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
+
+
+
+ When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
+
+
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ merges_file (`str`):
+ Path to the merges file.
+ errors (`str`, *optional*, defaults to `"replace"`):
+ Paradigm to follow when decoding bytes to UTF-8. See
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
+ unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
+ The beginning of sequence token.
+ eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
+ The end of sequence token.
+ pad_token (`str`, *optional*):
+ The token used for padding, for example when batching sequences of different lengths.
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
+ other word. (CodeGen tokenizer detect beginning of words by the preceding space).
+ add_bos_token (`bool`, *optional*, defaults to `False`):
+ Whether to add a beginning of sequence token at the start of sequences.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file,
+ merges_file,
+ errors="replace",
+ unk_token="<|endoftext|>",
+ bos_token="<|endoftext|>",
+ eos_token="<|endoftext|>",
+ pad_token=None,
+ add_prefix_space=False,
+ add_bos_token=False,
+ **kwargs,
+ ):
+ bos_token = AddedToken(bos_token, special=True) if isinstance(bos_token, str) else bos_token
+ eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token
+ unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token
+ pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token
+ self.add_bos_token = add_bos_token
+
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
+ self.encoder = json.load(vocab_handle)
+ self.decoder = {v: k for k, v in self.encoder.items()}
+ self.errors = errors # how to handle errors in decoding
+ self.byte_encoder = bytes_to_unicode()
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
+ with open(merges_file, encoding="utf-8") as merges_handle:
+ bpe_merges = merges_handle.read().split("\n")[1:-1]
+ bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
+ self.cache = {}
+ self.add_prefix_space = add_prefix_space
+
+ # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
+ self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
+ super().__init__(
+ errors=errors,
+ unk_token=unk_token,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ pad_token=pad_token,
+ add_prefix_space=add_prefix_space,
+ add_bos_token=add_bos_token,
+ **kwargs,
+ )
+
+ @property
+ def vocab_size(self):
+ return len(self.encoder)
+
+ def get_vocab(self):
+ return dict(self.encoder, **self.added_tokens_encoder)
+
+ def bpe(self, token):
+ if token in self.cache:
+ return self.cache[token]
+ word = tuple(token)
+ pairs = get_pairs(word)
+
+ if not pairs:
+ return token
+
+ while True:
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
+ if bigram not in self.bpe_ranks:
+ break
+ first, second = bigram
+ new_word = []
+ i = 0
+ while i < len(word):
+ try:
+ j = word.index(first, i)
+ except ValueError:
+ new_word.extend(word[i:])
+ break
+ else:
+ new_word.extend(word[i:j])
+ i = j
+
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
+ new_word.append(first + second)
+ i += 2
+ else:
+ new_word.append(word[i])
+ i += 1
+ new_word = tuple(new_word)
+ word = new_word
+ if len(word) == 1:
+ break
+ else:
+ pairs = get_pairs(word)
+ word = " ".join(word)
+ self.cache[token] = word
+ return word
+
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
+ if self.add_bos_token:
+ bos_token_ids = [self.bos_token_id]
+ else:
+ bos_token_ids = []
+
+ output = bos_token_ids + token_ids_0
+
+ if token_ids_1 is None:
+ return output
+
+ return output + bos_token_ids + token_ids_1
+
+ def _tokenize(self, text):
+ """Tokenize a string."""
+ bpe_tokens = []
+ for token in re.findall(self.pat, text):
+ token = "".join(
+ self.byte_encoder[b] for b in token.encode("utf-8")
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
+ bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
+ return bpe_tokens
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.decoder.get(index)
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ text = "".join(tokens)
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
+ return text
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+ merge_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
+ )
+
+ with open(vocab_file, "w", encoding="utf-8") as f:
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
+
+ index = 0
+ with open(merge_file, "w", encoding="utf-8") as writer:
+ writer.write("#version: 0.2\n")
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
+ if index != token_index:
+ logger.warning(
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
+ " Please check that the tokenizer is not corrupted!"
+ )
+ index = token_index
+ writer.write(" ".join(bpe_tokens) + "\n")
+ index += 1
+
+ return vocab_file, merge_file
+
+ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
+ add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
+ if is_split_into_words or add_prefix_space:
+ text = " " + text
+ return (text, kwargs)
+
+ def decode(
+ self,
+ token_ids: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"],
+ skip_special_tokens: bool = False,
+ clean_up_tokenization_spaces: bool = None,
+ truncate_before_pattern: Optional[List[str]] = None,
+ **kwargs,
+ ) -> str:
+ """
+ Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special
+ tokens and clean up tokenization spaces.
+
+ Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.
+
+ Args:
+ token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):
+ List of tokenized input ids. Can be obtained using the `__call__` method.
+ skip_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not to remove special tokens in the decoding.
+ clean_up_tokenization_spaces (`bool`, *optional*):
+ Whether or not to clean up the tokenization spaces. If `None`, will default to
+ `self.clean_up_tokenization_spaces` (available in the `tokenizer_config`).
+ truncate_before_pattern (`List[str]`, *optional*, defaults to `None`):
+ A list of regular expression strings that will be used to truncate the returned string. This can be
+ used to remove extra pieces of code (e.g. truncate if observing a comment symbol "#" at the beginning
+ of a new line). An example pattern could be `["^#", re.escape("<|endoftext|>"), "^'''", "\n\n\n"]`.
+ kwargs (additional keyword arguments, *optional*):
+ Will be passed to the underlying model specific decode method.
+
+ Returns:
+ `str`: The decoded sentence.
+ """
+
+ token_ids = to_py_obj(token_ids)
+
+ decoded_text = super()._decode(
+ token_ids=token_ids,
+ skip_special_tokens=skip_special_tokens,
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
+ **kwargs,
+ )
+
+ if truncate_before_pattern is not None and len(truncate_before_pattern) > 0:
+ decoded_text = self.truncate(decoded_text, truncate_before_pattern)
+
+ return decoded_text
+
+ def truncate(self, completion, truncate_before_pattern):
+ def find_re(string, pattern, start_pos):
+ m = pattern.search(string, start_pos)
+ return m.start() if m else -1
+
+ terminals = [re.compile(pattern, re.MULTILINE) for pattern in truncate_before_pattern]
+
+ prints = list(re.finditer("^print", completion, re.MULTILINE))
+
+ if len(prints) > 1:
+ completion = completion[: prints[1].start()]
+
+ defs = list(re.finditer("^def", completion, re.MULTILINE))
+
+ if len(defs) > 1:
+ completion = completion[: defs[1].start()]
+
+ start_pos = 0
+
+ terminals_pos = [
+ pos for pos in [find_re(completion, terminal, start_pos) for terminal in terminals] if pos != -1
+ ]
+
+ if len(terminals_pos) > 0:
+ return completion[: min(terminals_pos)]
+ else:
+ return completion
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/tokenization_codegen_fast.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/tokenization_codegen_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..3c2661db396162e90d7587a0e43a2801b60049f4
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/codegen/tokenization_codegen_fast.py
@@ -0,0 +1,255 @@
+# coding=utf-8
+# Copyright 2022 The Salesforce authors, The Open AI Team Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for OpenAI GPT."""
+
+
+import json
+import re
+from typing import TYPE_CHECKING, List, Optional, Tuple, Union
+
+import numpy as np
+
+from ...utils import is_tf_available, is_torch_available, logging
+
+
+if TYPE_CHECKING:
+ if is_torch_available():
+ import torch
+ if is_tf_available():
+ import tensorflow as tf
+
+from tokenizers import pre_tokenizers
+
+from ...tokenization_utils_base import BatchEncoding
+from ...tokenization_utils_fast import PreTrainedTokenizerFast
+from .tokenization_codegen import CodeGenTokenizer
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {
+ "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
+ },
+ "merges_file": {
+ "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
+ },
+ "tokenizer_file": {
+ "Salesforce/codegen-350M-mono": (
+ "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
+ ),
+ },
+}
+
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
+ "Salesforce/codegen-350M-mono": 2048,
+}
+
+
+class CodeGenTokenizerFast(PreTrainedTokenizerFast):
+ """
+ Construct a "fast" CodeGen tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level
+ Byte-Pair-Encoding.
+
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
+
+ ```python
+ >>> from transformers import CodeGenTokenizerFast
+
+ >>> tokenizer = CodeGenTokenizerFast.from_pretrained("Salesforce/codegen-350M-mono")
+ >>> tokenizer("Hello world")["input_ids"]
+ [15496, 995]
+
+ >>> tokenizer(" Hello world")["input_ids"]
+ [18435, 995]
+ ```
+
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since
+ the model was not pretrained this way, it might yield a decrease in performance.
+
+
+
+ When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
+
+
+
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
+ refer to this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`, *optional*):
+ Path to the vocabulary file.
+ merges_file (`str`, *optional*):
+ Path to the merges file.
+ tokenizer_file (`str`, *optional*):
+ Path to [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that
+ contains everything needed to load the tokenizer.
+ unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
+ The beginning of sequence token.
+ eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
+ The end of sequence token.
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
+ other word. (CodeGen tokenizer detect beginning of words by the preceding space).
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ model_input_names = ["input_ids", "attention_mask"]
+ slow_tokenizer_class = CodeGenTokenizer
+
+ def __init__(
+ self,
+ vocab_file=None,
+ merges_file=None,
+ tokenizer_file=None,
+ unk_token="<|endoftext|>",
+ bos_token="<|endoftext|>",
+ eos_token="<|endoftext|>",
+ add_prefix_space=False,
+ **kwargs,
+ ):
+ super().__init__(
+ vocab_file,
+ merges_file,
+ tokenizer_file=tokenizer_file,
+ unk_token=unk_token,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ add_prefix_space=add_prefix_space,
+ **kwargs,
+ )
+
+ if kwargs.pop("add_bos_token", False):
+ model_id = kwargs.pop("name_or_path", "")
+ raise ValueError(
+ "Currenty GPT2's fast tokenizer does NOT support adding a BOS token. "
+ "Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
+ f"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"
+ f"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"
+ "This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
+ " so that the fast tokenizer works correctly."
+ )
+
+ pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
+ if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
+ pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type"))
+ pre_tok_state["add_prefix_space"] = add_prefix_space
+ self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
+
+ self.add_prefix_space = add_prefix_space
+
+ def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
+ is_split_into_words = kwargs.get("is_split_into_words", False)
+ assert self.add_prefix_space or not is_split_into_words, (
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
+ "to use it with pretokenized inputs."
+ )
+
+ return super()._batch_encode_plus(*args, **kwargs)
+
+ def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
+ is_split_into_words = kwargs.get("is_split_into_words", False)
+
+ assert self.add_prefix_space or not is_split_into_words, (
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
+ "to use it with pretokenized inputs."
+ )
+
+ return super()._encode_plus(*args, **kwargs)
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
+ return tuple(files)
+
+ def decode(
+ self,
+ token_ids: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"],
+ skip_special_tokens: bool = False,
+ clean_up_tokenization_spaces: bool = None,
+ truncate_before_pattern: Optional[List[str]] = None,
+ **kwargs,
+ ) -> str:
+ """
+ Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special
+ tokens and clean up tokenization spaces.
+
+ Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.
+
+ Args:
+ token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):
+ List of tokenized input ids. Can be obtained using the `__call__` method.
+ skip_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not to remove special tokens in the decoding.
+ clean_up_tokenization_spaces (`bool`, *optional*):
+ Whether or not to clean up the tokenization spaces. If `None`, will default to
+ `self.clean_up_tokenization_spaces` (available in the `tokenizer_config`).
+ truncate_before_pattern (`List[str]`, *optional*, defaults to `None`):
+ A list of regular expression strings that will be used to truncate the returned string. This can be
+ used to remove extra pieces of code (e.g. truncate if observing a comment symbol "#" at the beginning
+ of a new line). An example pattern could be `["^#", re.escape("<|endoftext|>"), "^'''", "\n\n\n"]`.
+ kwargs (additional keyword arguments, *optional*):
+ Will be passed to the underlying model specific decode method.
+
+ Returns:
+ `str`: The decoded sentence.
+ """
+
+ decoded_text = super().decode(
+ token_ids=token_ids,
+ skip_special_tokens=skip_special_tokens,
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
+ **kwargs,
+ )
+
+ if truncate_before_pattern is not None and len(truncate_before_pattern) > 0:
+ decoded_text = self.truncate(decoded_text, truncate_before_pattern)
+
+ return decoded_text
+
+ def truncate(self, completion, truncate_before_pattern):
+ def find_re(string, pattern, start_pos):
+ m = pattern.search(string, start_pos)
+ return m.start() if m else -1
+
+ terminals = [re.compile(pattern, re.MULTILINE) for pattern in truncate_before_pattern]
+
+ prints = list(re.finditer("^print", completion, re.MULTILINE))
+
+ if len(prints) > 1:
+ completion = completion[: prints[1].start()]
+
+ defs = list(re.finditer("^def", completion, re.MULTILINE))
+
+ if len(defs) > 1:
+ completion = completion[: defs[1].start()]
+
+ start_pos = 0
+
+ terminals_pos = [
+ pos for pos in [find_re(completion, terminal, start_pos) for terminal in terminals] if pos != -1
+ ]
+
+ if len(terminals_pos) > 0:
+ return completion[: min(terminals_pos)]
+ else:
+ return completion
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..099a7fc9d63da4ef2cbe0308371d7b26d586e447
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/__init__.py
@@ -0,0 +1,102 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_tf_available,
+ is_torch_available,
+ is_vision_available,
+)
+
+
+_import_structure = {
+ "configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
+}
+
+try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["feature_extraction_convnext"] = ["ConvNextFeatureExtractor"]
+ _import_structure["image_processing_convnext"] = ["ConvNextImageProcessor"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_convnext"] = [
+ "CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "ConvNextForImageClassification",
+ "ConvNextModel",
+ "ConvNextPreTrainedModel",
+ "ConvNextBackbone",
+ ]
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_convnext"] = [
+ "TFConvNextForImageClassification",
+ "TFConvNextModel",
+ "TFConvNextPreTrainedModel",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
+
+ try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .feature_extraction_convnext import ConvNextFeatureExtractor
+ from .image_processing_convnext import ConvNextImageProcessor
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_convnext import (
+ CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ ConvNextBackbone,
+ ConvNextForImageClassification,
+ ConvNextModel,
+ ConvNextPreTrainedModel,
+ )
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/configuration_convnext.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/configuration_convnext.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8d9ac4f03b2d5ce827c7727b8982f70bdc87b12e
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/configuration_convnext.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/convert_convnext_to_pytorch.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/convert_convnext_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..316d6480f824e7f7f5dfae73d1d4c1c9afd42802
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/__pycache__/convert_convnext_to_pytorch.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/configuration_convnext.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/configuration_convnext.py
new file mode 100644
index 0000000000000000000000000000000000000000..48647bd1224ecd11d8116de172095cb120622a38
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/configuration_convnext.py
@@ -0,0 +1,144 @@
+# coding=utf-8
+# Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" ConvNeXT model configuration"""
+
+from collections import OrderedDict
+from typing import Mapping
+
+from packaging import version
+
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxConfig
+from ...utils import logging
+from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
+
+
+logger = logging.get_logger(__name__)
+
+CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "facebook/convnext-tiny-224": "https://huggingface.co/facebook/convnext-tiny-224/resolve/main/config.json",
+ # See all ConvNeXT models at https://huggingface.co/models?filter=convnext
+}
+
+
+class ConvNextConfig(BackboneConfigMixin, PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`ConvNextModel`]. It is used to instantiate an
+ ConvNeXT model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the ConvNeXT
+ [facebook/convnext-tiny-224](https://huggingface.co/facebook/convnext-tiny-224) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ num_channels (`int`, *optional*, defaults to 3):
+ The number of input channels.
+ patch_size (`int`, optional, defaults to 4):
+ Patch size to use in the patch embedding layer.
+ num_stages (`int`, optional, defaults to 4):
+ The number of stages in the model.
+ hidden_sizes (`List[int]`, *optional*, defaults to [96, 192, 384, 768]):
+ Dimensionality (hidden size) at each stage.
+ depths (`List[int]`, *optional*, defaults to [3, 3, 9, 3]):
+ Depth (number of blocks) for each stage.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in each block. If string, `"gelu"`, `"relu"`,
+ `"selu"` and `"gelu_new"` are supported.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ layer_scale_init_value (`float`, *optional*, defaults to 1e-6):
+ The initial value for the layer scale.
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
+ The drop rate for stochastic depth.
+ out_features (`List[str]`, *optional*):
+ If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
+ (depending on how many stages the model has). If unset and `out_indices` is set, will default to the
+ corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
+ same order as defined in the `stage_names` attribute.
+ out_indices (`List[int]`, *optional*):
+ If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
+ many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
+ If unset and `out_features` is unset, will default to the last stage. Must be in the
+ same order as defined in the `stage_names` attribute.
+
+ Example:
+ ```python
+ >>> from transformers import ConvNextConfig, ConvNextModel
+
+ >>> # Initializing a ConvNext convnext-tiny-224 style configuration
+ >>> configuration = ConvNextConfig()
+
+ >>> # Initializing a model (with random weights) from the convnext-tiny-224 style configuration
+ >>> model = ConvNextModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "convnext"
+
+ def __init__(
+ self,
+ num_channels=3,
+ patch_size=4,
+ num_stages=4,
+ hidden_sizes=None,
+ depths=None,
+ hidden_act="gelu",
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ layer_scale_init_value=1e-6,
+ drop_path_rate=0.0,
+ image_size=224,
+ out_features=None,
+ out_indices=None,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.num_channels = num_channels
+ self.patch_size = patch_size
+ self.num_stages = num_stages
+ self.hidden_sizes = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
+ self.depths = [3, 3, 9, 3] if depths is None else depths
+ self.hidden_act = hidden_act
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.layer_scale_init_value = layer_scale_init_value
+ self.drop_path_rate = drop_path_rate
+ self.image_size = image_size
+ self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
+ self._out_features, self._out_indices = get_aligned_output_features_output_indices(
+ out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
+ )
+
+
+class ConvNextOnnxConfig(OnnxConfig):
+ torch_onnx_minimum_version = version.parse("1.11")
+
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ return OrderedDict(
+ [
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
+ ]
+ )
+
+ @property
+ def atol_for_validation(self) -> float:
+ return 1e-5
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/convert_convnext_to_pytorch.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/convert_convnext_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..cdcbf24d552389ba34f55c8fa1af717aa26dd60f
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/convert_convnext_to_pytorch.py
@@ -0,0 +1,243 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert ConvNext checkpoints from the original repository.
+
+URL: https://github.com/facebookresearch/ConvNeXt"""
+
+
+import argparse
+import json
+from pathlib import Path
+
+import requests
+import torch
+from huggingface_hub import hf_hub_download
+from PIL import Image
+
+from transformers import ConvNextConfig, ConvNextForImageClassification, ConvNextImageProcessor
+from transformers.utils import logging
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+
+def get_convnext_config(checkpoint_url):
+ config = ConvNextConfig()
+
+ if "tiny" in checkpoint_url:
+ depths = [3, 3, 9, 3]
+ hidden_sizes = [96, 192, 384, 768]
+ if "small" in checkpoint_url:
+ depths = [3, 3, 27, 3]
+ hidden_sizes = [96, 192, 384, 768]
+ if "base" in checkpoint_url:
+ depths = [3, 3, 27, 3]
+ hidden_sizes = [128, 256, 512, 1024]
+ if "large" in checkpoint_url:
+ depths = [3, 3, 27, 3]
+ hidden_sizes = [192, 384, 768, 1536]
+ if "xlarge" in checkpoint_url:
+ depths = [3, 3, 27, 3]
+ hidden_sizes = [256, 512, 1024, 2048]
+
+ if "1k" in checkpoint_url:
+ num_labels = 1000
+ filename = "imagenet-1k-id2label.json"
+ expected_shape = (1, 1000)
+ else:
+ num_labels = 21841
+ filename = "imagenet-22k-id2label.json"
+ expected_shape = (1, 21841)
+
+ repo_id = "huggingface/label-files"
+ config.num_labels = num_labels
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
+ id2label = {int(k): v for k, v in id2label.items()}
+ if "1k" not in checkpoint_url:
+ # this dataset contains 21843 labels but the model only has 21841
+ # we delete the classes as mentioned in https://github.com/google-research/big_transfer/issues/18
+ del id2label[9205]
+ del id2label[15027]
+ config.id2label = id2label
+ config.label2id = {v: k for k, v in id2label.items()}
+ config.hidden_sizes = hidden_sizes
+ config.depths = depths
+
+ return config, expected_shape
+
+
+def rename_key(name):
+ if "downsample_layers.0.0" in name:
+ name = name.replace("downsample_layers.0.0", "embeddings.patch_embeddings")
+ if "downsample_layers.0.1" in name:
+ name = name.replace("downsample_layers.0.1", "embeddings.norm") # we rename to layernorm later on
+ if "downsample_layers.1.0" in name:
+ name = name.replace("downsample_layers.1.0", "stages.1.downsampling_layer.0")
+ if "downsample_layers.1.1" in name:
+ name = name.replace("downsample_layers.1.1", "stages.1.downsampling_layer.1")
+ if "downsample_layers.2.0" in name:
+ name = name.replace("downsample_layers.2.0", "stages.2.downsampling_layer.0")
+ if "downsample_layers.2.1" in name:
+ name = name.replace("downsample_layers.2.1", "stages.2.downsampling_layer.1")
+ if "downsample_layers.3.0" in name:
+ name = name.replace("downsample_layers.3.0", "stages.3.downsampling_layer.0")
+ if "downsample_layers.3.1" in name:
+ name = name.replace("downsample_layers.3.1", "stages.3.downsampling_layer.1")
+ if "stages" in name and "downsampling_layer" not in name:
+ # stages.0.0. for instance should be renamed to stages.0.layers.0.
+ name = name[: len("stages.0")] + ".layers" + name[len("stages.0") :]
+ if "stages" in name:
+ name = name.replace("stages", "encoder.stages")
+ if "norm" in name:
+ name = name.replace("norm", "layernorm")
+ if "gamma" in name:
+ name = name.replace("gamma", "layer_scale_parameter")
+ if "head" in name:
+ name = name.replace("head", "classifier")
+
+ return name
+
+
+# We will verify our results on an image of cute cats
+def prepare_img():
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ im = Image.open(requests.get(url, stream=True).raw)
+ return im
+
+
+@torch.no_grad()
+def convert_convnext_checkpoint(checkpoint_url, pytorch_dump_folder_path):
+ """
+ Copy/paste/tweak model's weights to our ConvNext structure.
+ """
+
+ # define ConvNext configuration based on URL
+ config, expected_shape = get_convnext_config(checkpoint_url)
+ # load original state_dict from URL
+ state_dict = torch.hub.load_state_dict_from_url(checkpoint_url)["model"]
+ # rename keys
+ for key in state_dict.copy().keys():
+ val = state_dict.pop(key)
+ state_dict[rename_key(key)] = val
+ # add prefix to all keys expect classifier head
+ for key in state_dict.copy().keys():
+ val = state_dict.pop(key)
+ if not key.startswith("classifier"):
+ key = "convnext." + key
+ state_dict[key] = val
+
+ # load HuggingFace model
+ model = ConvNextForImageClassification(config)
+ model.load_state_dict(state_dict)
+ model.eval()
+
+ # Check outputs on an image, prepared by ConvNextImageProcessor
+ size = 224 if "224" in checkpoint_url else 384
+ image_processor = ConvNextImageProcessor(size=size)
+ pixel_values = image_processor(images=prepare_img(), return_tensors="pt").pixel_values
+
+ logits = model(pixel_values).logits
+
+ # note: the logits below were obtained without center cropping
+ if checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth":
+ expected_logits = torch.tensor([-0.1210, -0.6605, 0.1918])
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth":
+ expected_logits = torch.tensor([-0.4473, -0.1847, -0.6365])
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth":
+ expected_logits = torch.tensor([0.4525, 0.7539, 0.0308])
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_384.pth":
+ expected_logits = torch.tensor([0.3561, 0.6350, -0.0384])
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth":
+ expected_logits = torch.tensor([0.4174, -0.0989, 0.1489])
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_384.pth":
+ expected_logits = torch.tensor([0.2513, -0.1349, -0.1613])
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth":
+ expected_logits = torch.tensor([1.2980, 0.3631, -0.1198])
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth":
+ expected_logits = torch.tensor([1.2963, 0.1227, 0.1723])
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth":
+ expected_logits = torch.tensor([1.7956, 0.8390, 0.2820])
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_224.pth":
+ expected_logits = torch.tensor([-0.2822, -0.0502, -0.0878])
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_384.pth":
+ expected_logits = torch.tensor([-0.5672, -0.0730, -0.4348])
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_224.pth":
+ expected_logits = torch.tensor([0.2681, 0.2365, 0.6246])
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_384.pth":
+ expected_logits = torch.tensor([-0.2642, 0.3931, 0.5116])
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_224_ema.pth":
+ expected_logits = torch.tensor([-0.6677, -0.1873, -0.8379])
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_384_ema.pth":
+ expected_logits = torch.tensor([-0.7749, -0.2967, -0.6444])
+ else:
+ raise ValueError(f"Unknown URL: {checkpoint_url}")
+
+ assert torch.allclose(logits[0, :3], expected_logits, atol=1e-3)
+ assert logits.shape == expected_shape
+
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
+ print(f"Saving model to {pytorch_dump_folder_path}")
+ model.save_pretrained(pytorch_dump_folder_path)
+ print(f"Saving image processor to {pytorch_dump_folder_path}")
+ image_processor.save_pretrained(pytorch_dump_folder_path)
+
+ print("Pushing model to the hub...")
+ model_name = "convnext"
+ if "tiny" in checkpoint_url:
+ model_name += "-tiny"
+ elif "small" in checkpoint_url:
+ model_name += "-small"
+ elif "base" in checkpoint_url:
+ model_name += "-base"
+ elif "xlarge" in checkpoint_url:
+ model_name += "-xlarge"
+ elif "large" in checkpoint_url:
+ model_name += "-large"
+ if "224" in checkpoint_url:
+ model_name += "-224"
+ elif "384" in checkpoint_url:
+ model_name += "-384"
+ if "22k" in checkpoint_url and "1k" not in checkpoint_url:
+ model_name += "-22k"
+ if "22k" in checkpoint_url and "1k" in checkpoint_url:
+ model_name += "-22k-1k"
+
+ model.push_to_hub(
+ repo_path_or_name=Path(pytorch_dump_folder_path, model_name),
+ organization="nielsr",
+ commit_message="Add model",
+ )
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--checkpoint_url",
+ default="https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth",
+ type=str,
+ help="URL of the original ConvNeXT checkpoint you'd like to convert.",
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path",
+ default=None,
+ type=str,
+ required=True,
+ help="Path to the output PyTorch model directory.",
+ )
+
+ args = parser.parse_args()
+ convert_convnext_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/feature_extraction_convnext.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/feature_extraction_convnext.py
new file mode 100644
index 0000000000000000000000000000000000000000..92b8a8f4fba82fb72b83384d2cbcb6abfe773ea2
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/feature_extraction_convnext.py
@@ -0,0 +1,33 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Feature extractor class for ConvNeXT."""
+
+import warnings
+
+from ...utils import logging
+from .image_processing_convnext import ConvNextImageProcessor
+
+
+logger = logging.get_logger(__name__)
+
+
+class ConvNextFeatureExtractor(ConvNextImageProcessor):
+ def __init__(self, *args, **kwargs) -> None:
+ warnings.warn(
+ "The class ConvNextFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
+ " Please use ConvNextImageProcessor instead.",
+ FutureWarning,
+ )
+ super().__init__(*args, **kwargs)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/image_processing_convnext.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/image_processing_convnext.py
new file mode 100644
index 0000000000000000000000000000000000000000..54060105f59eb264af6d2ee5c58c8308e0a8fa49
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/image_processing_convnext.py
@@ -0,0 +1,338 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Image processor class for ConvNeXT."""
+
+from typing import Dict, List, Optional, Union
+
+import numpy as np
+
+from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
+from ...image_transforms import (
+ center_crop,
+ get_resize_output_image_size,
+ resize,
+ to_channel_dimension_format,
+)
+from ...image_utils import (
+ IMAGENET_STANDARD_MEAN,
+ IMAGENET_STANDARD_STD,
+ ChannelDimension,
+ ImageInput,
+ PILImageResampling,
+ infer_channel_dimension_format,
+ is_scaled_image,
+ make_list_of_images,
+ to_numpy_array,
+ valid_images,
+ validate_kwargs,
+ validate_preprocess_arguments,
+)
+from ...utils import TensorType, is_vision_available, logging
+
+
+if is_vision_available():
+ import PIL
+
+
+logger = logging.get_logger(__name__)
+
+
+class ConvNextImageProcessor(BaseImageProcessor):
+ r"""
+ Constructs a ConvNeXT image processor.
+
+ Args:
+ do_resize (`bool`, *optional*, defaults to `True`):
+ Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overriden
+ by `do_resize` in the `preprocess` method.
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 384}`):
+ Resolution of the output image after `resize` is applied. If `size["shortest_edge"]` >= 384, the image is
+ resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the image will
+ be matched to `int(size["shortest_edge"]/crop_pct)`, after which the image is cropped to
+ `(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`. Can
+ be overriden by `size` in the `preprocess` method.
+ crop_pct (`float` *optional*, defaults to 224 / 256):
+ Percentage of the image to crop. Only has an effect if `do_resize` is `True` and size < 384. Can be
+ overriden by `crop_pct` in the `preprocess` method.
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
+ Resampling filter to use if resizing the image. Can be overriden by `resample` in the `preprocess` method.
+ do_rescale (`bool`, *optional*, defaults to `True`):
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overriden by `do_rescale` in
+ the `preprocess` method.
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
+ Scale factor to use if rescaling the image. Can be overriden by `rescale_factor` in the `preprocess`
+ method.
+ do_normalize (`bool`, *optional*, defaults to `True`):
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
+ method.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
+ """
+
+ model_input_names = ["pixel_values"]
+
+ def __init__(
+ self,
+ do_resize: bool = True,
+ size: Dict[str, int] = None,
+ crop_pct: float = None,
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
+ do_rescale: bool = True,
+ rescale_factor: Union[int, float] = 1 / 255,
+ do_normalize: bool = True,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ **kwargs,
+ ) -> None:
+ super().__init__(**kwargs)
+ size = size if size is not None else {"shortest_edge": 384}
+ size = get_size_dict(size, default_to_square=False)
+
+ self.do_resize = do_resize
+ self.size = size
+ # Default value set here for backwards compatibility where the value in config is None
+ self.crop_pct = crop_pct if crop_pct is not None else 224 / 256
+ self.resample = resample
+ self.do_rescale = do_rescale
+ self.rescale_factor = rescale_factor
+ self.do_normalize = do_normalize
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
+ self._valid_processor_keys = [
+ "images",
+ "do_resize",
+ "size",
+ "crop_pct",
+ "resample",
+ "do_rescale",
+ "rescale_factor",
+ "do_normalize",
+ "image_mean",
+ "image_std",
+ "return_tensors",
+ "data_format",
+ "input_data_format",
+ ]
+
+ def resize(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ crop_pct: float,
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> np.ndarray:
+ """
+ Resize an image.
+
+ Args:
+ image (`np.ndarray`):
+ Image to resize.
+ size (`Dict[str, int]`):
+ Dictionary of the form `{"shortest_edge": int}`, specifying the size of the output image. If
+ `size["shortest_edge"]` >= 384 image is resized to `(size["shortest_edge"], size["shortest_edge"])`.
+ Otherwise, the smaller edge of the image will be matched to `int(size["shortest_edge"] / crop_pct)`,
+ after which the image is cropped to `(size["shortest_edge"], size["shortest_edge"])`.
+ crop_pct (`float`):
+ Percentage of the image to crop. Only has an effect if size < 384.
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
+ Resampling filter to use when resizing the image.
+ data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred from the input
+ image.
+ """
+ size = get_size_dict(size, default_to_square=False)
+ if "shortest_edge" not in size:
+ raise ValueError(f"Size dictionary must contain 'shortest_edge' key. Got {size.keys()}")
+ shortest_edge = size["shortest_edge"]
+
+ if shortest_edge < 384:
+ # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
+ resize_shortest_edge = int(shortest_edge / crop_pct)
+ resize_size = get_resize_output_image_size(
+ image, size=resize_shortest_edge, default_to_square=False, input_data_format=input_data_format
+ )
+ image = resize(
+ image=image,
+ size=resize_size,
+ resample=resample,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ **kwargs,
+ )
+ # then crop to (shortest_edge, shortest_edge)
+ return center_crop(
+ image=image,
+ size=(shortest_edge, shortest_edge),
+ data_format=data_format,
+ input_data_format=input_data_format,
+ **kwargs,
+ )
+ else:
+ # warping (no cropping) when evaluated at 384 or larger
+ return resize(
+ image,
+ size=(shortest_edge, shortest_edge),
+ resample=resample,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ **kwargs,
+ )
+
+ def preprocess(
+ self,
+ images: ImageInput,
+ do_resize: bool = None,
+ size: Dict[str, int] = None,
+ crop_pct: float = None,
+ resample: PILImageResampling = None,
+ do_rescale: bool = None,
+ rescale_factor: float = None,
+ do_normalize: bool = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ data_format: ChannelDimension = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> PIL.Image.Image:
+ """
+ Preprocess an image or batch of images.
+
+ Args:
+ images (`ImageInput`):
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
+ Whether to resize the image.
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
+ Size of the output image after `resize` has been applied. If `size["shortest_edge"]` >= 384, the image
+ is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the
+ image will be matched to `int(size["shortest_edge"]/ crop_pct)`, after which the image is cropped to
+ `(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`.
+ crop_pct (`float`, *optional*, defaults to `self.crop_pct`):
+ Percentage of the image to crop if size < 384.
+ resample (`int`, *optional*, defaults to `self.resample`):
+ Resampling filter to use if resizing the image. This can be one of `PILImageResampling`, filters. Only
+ has an effect if `do_resize` is set to `True`.
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
+ Whether to rescale the image values between [0 - 1].
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
+ Whether to normalize the image.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
+ Image mean.
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
+ Image standard deviation.
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Can be one of:
+ - Unset: Return a list of `np.ndarray`.
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format for the output image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - Unset: Use the channel dimension format of the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ """
+ do_resize = do_resize if do_resize is not None else self.do_resize
+ crop_pct = crop_pct if crop_pct is not None else self.crop_pct
+ resample = resample if resample is not None else self.resample
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
+ image_mean = image_mean if image_mean is not None else self.image_mean
+ image_std = image_std if image_std is not None else self.image_std
+
+ size = size if size is not None else self.size
+ size = get_size_dict(size, default_to_square=False)
+
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
+
+ images = make_list_of_images(images)
+
+ if not valid_images(images):
+ raise ValueError(
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+
+ validate_preprocess_arguments(
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ )
+
+ # All transformations expect numpy arrays.
+ images = [to_numpy_array(image) for image in images]
+
+ if is_scaled_image(images[0]) and do_rescale:
+ logger.warning_once(
+ "It looks like you are trying to rescale already rescaled images. If the input"
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
+ )
+
+ if input_data_format is None:
+ # We assume that all images have the same channel dimension format.
+ input_data_format = infer_channel_dimension_format(images[0])
+
+ if do_resize:
+ images = [
+ self.resize(
+ image=image, size=size, crop_pct=crop_pct, resample=resample, input_data_format=input_data_format
+ )
+ for image in images
+ ]
+
+ if do_rescale:
+ images = [
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ if do_normalize:
+ images = [
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ images = [
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
+ ]
+
+ data = {"pixel_values": images}
+ return BatchFeature(data=data, tensor_type=return_tensors)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/modeling_convnext.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/modeling_convnext.py
new file mode 100644
index 0000000000000000000000000000000000000000..a952e5d8165e158394419dd4ad6fcb8b32c82b63
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/modeling_convnext.py
@@ -0,0 +1,553 @@
+# coding=utf-8
+# Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch ConvNext model."""
+
+
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import (
+ BackboneOutput,
+ BaseModelOutputWithNoAttention,
+ BaseModelOutputWithPoolingAndNoAttention,
+ ImageClassifierOutputWithNoAttention,
+)
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from ...utils.backbone_utils import BackboneMixin
+from .configuration_convnext import ConvNextConfig
+
+
+logger = logging.get_logger(__name__)
+
+# General docstring
+_CONFIG_FOR_DOC = "ConvNextConfig"
+
+# Base docstring
+_CHECKPOINT_FOR_DOC = "facebook/convnext-tiny-224"
+_EXPECTED_OUTPUT_SHAPE = [1, 768, 7, 7]
+
+# Image classification docstring
+_IMAGE_CLASS_CHECKPOINT = "facebook/convnext-tiny-224"
+_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
+
+CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "facebook/convnext-tiny-224",
+ # See all ConvNext models at https://huggingface.co/models?filter=convnext
+]
+
+
+# Copied from transformers.models.beit.modeling_beit.drop_path
+def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
+ """
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
+
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
+ argument.
+ """
+ if drop_prob == 0.0 or not training:
+ return input
+ keep_prob = 1 - drop_prob
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
+ random_tensor.floor_() # binarize
+ output = input.div(keep_prob) * random_tensor
+ return output
+
+
+# Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->ConvNext
+class ConvNextDropPath(nn.Module):
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
+
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
+ super().__init__()
+ self.drop_prob = drop_prob
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ return drop_path(hidden_states, self.drop_prob, self.training)
+
+ def extra_repr(self) -> str:
+ return "p={}".format(self.drop_prob)
+
+
+class ConvNextLayerNorm(nn.Module):
+ r"""LayerNorm that supports two data formats: channels_last (default) or channels_first.
+ The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height,
+ width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width).
+ """
+
+ def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
+ super().__init__()
+ self.weight = nn.Parameter(torch.ones(normalized_shape))
+ self.bias = nn.Parameter(torch.zeros(normalized_shape))
+ self.eps = eps
+ self.data_format = data_format
+ if self.data_format not in ["channels_last", "channels_first"]:
+ raise NotImplementedError(f"Unsupported data format: {self.data_format}")
+ self.normalized_shape = (normalized_shape,)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ if self.data_format == "channels_last":
+ x = torch.nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
+ elif self.data_format == "channels_first":
+ input_dtype = x.dtype
+ x = x.float()
+ u = x.mean(1, keepdim=True)
+ s = (x - u).pow(2).mean(1, keepdim=True)
+ x = (x - u) / torch.sqrt(s + self.eps)
+ x = x.to(dtype=input_dtype)
+ x = self.weight[:, None, None] * x + self.bias[:, None, None]
+ return x
+
+
+class ConvNextEmbeddings(nn.Module):
+ """This class is comparable to (and inspired by) the SwinEmbeddings class
+ found in src/transformers/models/swin/modeling_swin.py.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ self.patch_embeddings = nn.Conv2d(
+ config.num_channels, config.hidden_sizes[0], kernel_size=config.patch_size, stride=config.patch_size
+ )
+ self.layernorm = ConvNextLayerNorm(config.hidden_sizes[0], eps=1e-6, data_format="channels_first")
+ self.num_channels = config.num_channels
+
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
+ num_channels = pixel_values.shape[1]
+ if num_channels != self.num_channels:
+ raise ValueError(
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
+ )
+ embeddings = self.patch_embeddings(pixel_values)
+ embeddings = self.layernorm(embeddings)
+ return embeddings
+
+
+class ConvNextLayer(nn.Module):
+ """This corresponds to the `Block` class in the original implementation.
+
+ There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C,
+ H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back
+
+ The authors used (2) as they find it slightly faster in PyTorch.
+
+ Args:
+ config ([`ConvNextConfig`]): Model configuration class.
+ dim (`int`): Number of input channels.
+ drop_path (`float`): Stochastic depth rate. Default: 0.0.
+ """
+
+ def __init__(self, config, dim, drop_path=0):
+ super().__init__()
+ self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv
+ self.layernorm = ConvNextLayerNorm(dim, eps=1e-6)
+ self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers
+ self.act = ACT2FN[config.hidden_act]
+ self.pwconv2 = nn.Linear(4 * dim, dim)
+ self.layer_scale_parameter = (
+ nn.Parameter(config.layer_scale_init_value * torch.ones((dim)), requires_grad=True)
+ if config.layer_scale_init_value > 0
+ else None
+ )
+ self.drop_path = ConvNextDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
+
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
+ input = hidden_states
+ x = self.dwconv(hidden_states)
+ x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
+ x = self.layernorm(x)
+ x = self.pwconv1(x)
+ x = self.act(x)
+ x = self.pwconv2(x)
+ if self.layer_scale_parameter is not None:
+ x = self.layer_scale_parameter * x
+ x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
+
+ x = input + self.drop_path(x)
+ return x
+
+
+class ConvNextStage(nn.Module):
+ """ConvNeXT stage, consisting of an optional downsampling layer + multiple residual blocks.
+
+ Args:
+ config ([`ConvNextConfig`]): Model configuration class.
+ in_channels (`int`): Number of input channels.
+ out_channels (`int`): Number of output channels.
+ depth (`int`): Number of residual blocks.
+ drop_path_rates(`List[float]`): Stochastic depth rates for each layer.
+ """
+
+ def __init__(self, config, in_channels, out_channels, kernel_size=2, stride=2, depth=2, drop_path_rates=None):
+ super().__init__()
+
+ if in_channels != out_channels or stride > 1:
+ self.downsampling_layer = nn.Sequential(
+ ConvNextLayerNorm(in_channels, eps=1e-6, data_format="channels_first"),
+ nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride),
+ )
+ else:
+ self.downsampling_layer = nn.Identity()
+ drop_path_rates = drop_path_rates or [0.0] * depth
+ self.layers = nn.Sequential(
+ *[ConvNextLayer(config, dim=out_channels, drop_path=drop_path_rates[j]) for j in range(depth)]
+ )
+
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
+ hidden_states = self.downsampling_layer(hidden_states)
+ hidden_states = self.layers(hidden_states)
+ return hidden_states
+
+
+class ConvNextEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.stages = nn.ModuleList()
+ drop_path_rates = [
+ x.tolist() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths)).split(config.depths)
+ ]
+ prev_chs = config.hidden_sizes[0]
+ for i in range(config.num_stages):
+ out_chs = config.hidden_sizes[i]
+ stage = ConvNextStage(
+ config,
+ in_channels=prev_chs,
+ out_channels=out_chs,
+ stride=2 if i > 0 else 1,
+ depth=config.depths[i],
+ drop_path_rates=drop_path_rates[i],
+ )
+ self.stages.append(stage)
+ prev_chs = out_chs
+
+ def forward(
+ self,
+ hidden_states: torch.FloatTensor,
+ output_hidden_states: Optional[bool] = False,
+ return_dict: Optional[bool] = True,
+ ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
+ all_hidden_states = () if output_hidden_states else None
+
+ for i, layer_module in enumerate(self.stages):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ hidden_states = layer_module(hidden_states)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
+
+ return BaseModelOutputWithNoAttention(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ )
+
+
+class ConvNextPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = ConvNextConfig
+ base_model_prefix = "convnext"
+ main_input_name = "pixel_values"
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+CONVNEXT_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`ConvNextConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+CONVNEXT_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
+ [`ConvNextImageProcessor.__call__`] for details.
+
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare ConvNext model outputting raw features without any specific head on top.",
+ CONVNEXT_START_DOCSTRING,
+)
+class ConvNextModel(ConvNextPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = ConvNextEmbeddings(config)
+ self.encoder = ConvNextEncoder(config)
+
+ # final layernorm layer
+ self.layernorm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithPoolingAndNoAttention,
+ config_class=_CONFIG_FOR_DOC,
+ modality="vision",
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
+ )
+ def forward(
+ self,
+ pixel_values: torch.FloatTensor = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPoolingAndNoAttention]:
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ embedding_output = self.embeddings(pixel_values)
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ last_hidden_state = encoder_outputs[0]
+
+ # global average pooling, (N, C, H, W) -> (N, C)
+ pooled_output = self.layernorm(last_hidden_state.mean([-2, -1]))
+
+ if not return_dict:
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPoolingAndNoAttention(
+ last_hidden_state=last_hidden_state,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ )
+
+
+@add_start_docstrings(
+ """
+ ConvNext Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
+ ImageNet.
+ """,
+ CONVNEXT_START_DOCSTRING,
+)
+class ConvNextForImageClassification(ConvNextPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.num_labels = config.num_labels
+ self.convnext = ConvNextModel(config)
+
+ # Classifier head
+ self.classifier = (
+ nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
+ output_type=ImageClassifierOutputWithNoAttention,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
+ )
+ def forward(
+ self,
+ pixel_values: torch.FloatTensor = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.convnext(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
+
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
+
+ logits = self.classifier(pooled_output)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return ImageClassifierOutputWithNoAttention(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ )
+
+
+@add_start_docstrings(
+ """
+ ConvNeXt backbone, to be used with frameworks like DETR and MaskFormer.
+ """,
+ CONVNEXT_START_DOCSTRING,
+)
+class ConvNextBackbone(ConvNextPreTrainedModel, BackboneMixin):
+ def __init__(self, config):
+ super().__init__(config)
+ super()._init_backbone(config)
+
+ self.embeddings = ConvNextEmbeddings(config)
+ self.encoder = ConvNextEncoder(config)
+ self.num_features = [config.hidden_sizes[0]] + config.hidden_sizes
+
+ # Add layer norms to hidden states of out_features
+ hidden_states_norms = {}
+ for stage, num_channels in zip(self._out_features, self.channels):
+ hidden_states_norms[stage] = ConvNextLayerNorm(num_channels, data_format="channels_first")
+ self.hidden_states_norms = nn.ModuleDict(hidden_states_norms)
+
+ # initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: torch.Tensor,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> BackboneOutput:
+ """
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, AutoBackbone
+ >>> import torch
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> processor = AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224")
+ >>> model = AutoBackbone.from_pretrained("facebook/convnext-tiny-224")
+
+ >>> inputs = processor(image, return_tensors="pt")
+ >>> outputs = model(**inputs)
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+
+ embedding_output = self.embeddings(pixel_values)
+
+ outputs = self.encoder(
+ embedding_output,
+ output_hidden_states=True,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs.hidden_states if return_dict else outputs[1]
+
+ feature_maps = ()
+ for stage, hidden_state in zip(self.stage_names, hidden_states):
+ if stage in self.out_features:
+ hidden_state = self.hidden_states_norms[stage](hidden_state)
+ feature_maps += (hidden_state,)
+
+ if not return_dict:
+ output = (feature_maps,)
+ if output_hidden_states:
+ output += (hidden_states,)
+ return output
+
+ return BackboneOutput(
+ feature_maps=feature_maps,
+ hidden_states=hidden_states if output_hidden_states else None,
+ attentions=None,
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/modeling_tf_convnext.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/modeling_tf_convnext.py
new file mode 100644
index 0000000000000000000000000000000000000000..b92ac446d94f21c988f431150b663b919e52e975
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnext/modeling_tf_convnext.py
@@ -0,0 +1,667 @@
+# coding=utf-8
+# Copyright 2022 Meta Platforms Inc. and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" TF 2.0 ConvNext model."""
+
+
+from __future__ import annotations
+
+from typing import List, Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ...activations_tf import get_tf_activation
+from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling, TFSequenceClassifierOutput
+from ...modeling_tf_utils import (
+ TFModelInputType,
+ TFPreTrainedModel,
+ TFSequenceClassificationLoss,
+ get_initializer,
+ keras,
+ keras_serializable,
+ unpack_inputs,
+)
+from ...tf_utils import shape_list
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+from .configuration_convnext import ConvNextConfig
+
+
+logger = logging.get_logger(__name__)
+
+
+_CONFIG_FOR_DOC = "ConvNextConfig"
+_CHECKPOINT_FOR_DOC = "facebook/convnext-tiny-224"
+
+
+class TFConvNextDropPath(keras.layers.Layer):
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
+ References:
+ (1) github.com:rwightman/pytorch-image-models
+ """
+
+ def __init__(self, drop_path: float, **kwargs):
+ super().__init__(**kwargs)
+ self.drop_path = drop_path
+
+ def call(self, x: tf.Tensor, training=None):
+ if training:
+ keep_prob = 1 - self.drop_path
+ shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1)
+ random_tensor = keep_prob + tf.random.uniform(shape, 0, 1)
+ random_tensor = tf.floor(random_tensor)
+ return (x / keep_prob) * random_tensor
+ return x
+
+
+class TFConvNextEmbeddings(keras.layers.Layer):
+ """This class is comparable to (and inspired by) the SwinEmbeddings class
+ found in src/transformers/models/swin/modeling_swin.py.
+ """
+
+ def __init__(self, config: ConvNextConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.patch_embeddings = keras.layers.Conv2D(
+ filters=config.hidden_sizes[0],
+ kernel_size=config.patch_size,
+ strides=config.patch_size,
+ name="patch_embeddings",
+ kernel_initializer=get_initializer(config.initializer_range),
+ bias_initializer=keras.initializers.Zeros(),
+ )
+ self.layernorm = keras.layers.LayerNormalization(epsilon=1e-6, name="layernorm")
+ self.num_channels = config.num_channels
+ self.config = config
+
+ def call(self, pixel_values):
+ if isinstance(pixel_values, dict):
+ pixel_values = pixel_values["pixel_values"]
+
+ tf.debugging.assert_equal(
+ shape_list(pixel_values)[1],
+ self.num_channels,
+ message="Make sure that the channel dimension of the pixel values match with the one set in the configuration.",
+ )
+
+ # When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format.
+ # So change the input format from `NCHW` to `NHWC`.
+ # shape = (batch_size, in_height, in_width, in_channels)
+ pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
+
+ embeddings = self.patch_embeddings(pixel_values)
+ embeddings = self.layernorm(embeddings)
+ return embeddings
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "patch_embeddings", None) is not None:
+ with tf.name_scope(self.patch_embeddings.name):
+ self.patch_embeddings.build([None, None, None, self.config.num_channels])
+ if getattr(self, "layernorm", None) is not None:
+ with tf.name_scope(self.layernorm.name):
+ self.layernorm.build([None, None, None, self.config.hidden_sizes[0]])
+
+
+class TFConvNextLayer(keras.layers.Layer):
+ """This corresponds to the `Block` class in the original implementation.
+
+ There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C,
+ H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back
+
+ The authors used (2) as they find it slightly faster in PyTorch. Since we already permuted the inputs to follow
+ NHWC ordering, we can just apply the operations straight-away without the permutation.
+
+ Args:
+ config ([`ConvNextConfig`]): Model configuration class.
+ dim (`int`): Number of input channels.
+ drop_path (`float`): Stochastic depth rate. Default: 0.0.
+ """
+
+ def __init__(self, config, dim, drop_path=0.0, **kwargs):
+ super().__init__(**kwargs)
+ self.dim = dim
+ self.config = config
+ self.dwconv = keras.layers.Conv2D(
+ filters=dim,
+ kernel_size=7,
+ padding="same",
+ groups=dim,
+ kernel_initializer=get_initializer(config.initializer_range),
+ bias_initializer="zeros",
+ name="dwconv",
+ ) # depthwise conv
+ self.layernorm = keras.layers.LayerNormalization(
+ epsilon=1e-6,
+ name="layernorm",
+ )
+ self.pwconv1 = keras.layers.Dense(
+ units=4 * dim,
+ kernel_initializer=get_initializer(config.initializer_range),
+ bias_initializer="zeros",
+ name="pwconv1",
+ ) # pointwise/1x1 convs, implemented with linear layers
+ self.act = get_tf_activation(config.hidden_act)
+ self.pwconv2 = keras.layers.Dense(
+ units=dim,
+ kernel_initializer=get_initializer(config.initializer_range),
+ bias_initializer="zeros",
+ name="pwconv2",
+ )
+ # Using `layers.Activation` instead of `tf.identity` to better control `training`
+ # behaviour.
+ self.drop_path = (
+ TFConvNextDropPath(drop_path, name="drop_path")
+ if drop_path > 0.0
+ else keras.layers.Activation("linear", name="drop_path")
+ )
+
+ def build(self, input_shape: tf.TensorShape = None):
+ # PT's `nn.Parameters` must be mapped to a TF layer weight to inherit the same name hierarchy (and vice-versa)
+ self.layer_scale_parameter = (
+ self.add_weight(
+ shape=(self.dim,),
+ initializer=keras.initializers.Constant(value=self.config.layer_scale_init_value),
+ trainable=True,
+ name="layer_scale_parameter",
+ )
+ if self.config.layer_scale_init_value > 0
+ else None
+ )
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dwconv", None) is not None:
+ with tf.name_scope(self.dwconv.name):
+ self.dwconv.build([None, None, None, self.dim])
+ if getattr(self, "layernorm", None) is not None:
+ with tf.name_scope(self.layernorm.name):
+ self.layernorm.build([None, None, None, self.dim])
+ if getattr(self, "pwconv1", None) is not None:
+ with tf.name_scope(self.pwconv1.name):
+ self.pwconv1.build([None, None, self.dim])
+ if getattr(self, "pwconv2", None) is not None:
+ with tf.name_scope(self.pwconv2.name):
+ self.pwconv2.build([None, None, 4 * self.dim])
+ if getattr(self, "drop_path", None) is not None:
+ with tf.name_scope(self.drop_path.name):
+ self.drop_path.build(None)
+
+ def call(self, hidden_states, training=False):
+ input = hidden_states
+ x = self.dwconv(hidden_states)
+ x = self.layernorm(x)
+ x = self.pwconv1(x)
+ x = self.act(x)
+ x = self.pwconv2(x)
+
+ if self.layer_scale_parameter is not None:
+ x = self.layer_scale_parameter * x
+
+ x = input + self.drop_path(x, training=training)
+ return x
+
+
+class TFConvNextStage(keras.layers.Layer):
+ """ConvNext stage, consisting of an optional downsampling layer + multiple residual blocks.
+
+ Args:
+ config (`ConvNextV2Config`):
+ Model configuration class.
+ in_channels (`int`):
+ Number of input channels.
+ out_channels (`int`):
+ Number of output channels.
+ depth (`int`):
+ Number of residual blocks.
+ drop_path_rates(`List[float]`):
+ Stochastic depth rates for each layer.
+ """
+
+ def __init__(
+ self,
+ config: ConvNextConfig,
+ in_channels: int,
+ out_channels: int,
+ kernel_size: int = 2,
+ stride: int = 2,
+ depth: int = 2,
+ drop_path_rates: Optional[List[float]] = None,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ if in_channels != out_channels or stride > 1:
+ self.downsampling_layer = [
+ keras.layers.LayerNormalization(
+ epsilon=1e-6,
+ name="downsampling_layer.0",
+ ),
+ # Inputs to this layer will follow NHWC format since we
+ # transposed the inputs from NCHW to NHWC in the `TFConvNextEmbeddings`
+ # layer. All the outputs throughout the model will be in NHWC
+ # from this point on until the output where we again change to
+ # NCHW.
+ keras.layers.Conv2D(
+ filters=out_channels,
+ kernel_size=kernel_size,
+ strides=stride,
+ kernel_initializer=get_initializer(config.initializer_range),
+ bias_initializer=keras.initializers.Zeros(),
+ name="downsampling_layer.1",
+ ),
+ ]
+ else:
+ self.downsampling_layer = [tf.identity]
+
+ drop_path_rates = drop_path_rates or [0.0] * depth
+ self.layers = [
+ TFConvNextLayer(
+ config,
+ dim=out_channels,
+ drop_path=drop_path_rates[j],
+ name=f"layers.{j}",
+ )
+ for j in range(depth)
+ ]
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.stride = stride
+
+ def call(self, hidden_states):
+ for layer in self.downsampling_layer:
+ hidden_states = layer(hidden_states)
+ for layer in self.layers:
+ hidden_states = layer(hidden_states)
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "layers", None) is not None:
+ for layer in self.layers:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+ if self.in_channels != self.out_channels or self.stride > 1:
+ with tf.name_scope(self.downsampling_layer[0].name):
+ self.downsampling_layer[0].build([None, None, None, self.in_channels])
+ with tf.name_scope(self.downsampling_layer[1].name):
+ self.downsampling_layer[1].build([None, None, None, self.in_channels])
+
+
+class TFConvNextEncoder(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.stages = []
+ drop_path_rates = tf.linspace(0.0, config.drop_path_rate, sum(config.depths))
+ drop_path_rates = tf.split(drop_path_rates, config.depths)
+ drop_path_rates = [x.numpy().tolist() for x in drop_path_rates]
+ prev_chs = config.hidden_sizes[0]
+ for i in range(config.num_stages):
+ out_chs = config.hidden_sizes[i]
+ stage = TFConvNextStage(
+ config,
+ in_channels=prev_chs,
+ out_channels=out_chs,
+ stride=2 if i > 0 else 1,
+ depth=config.depths[i],
+ drop_path_rates=drop_path_rates[i],
+ name=f"stages.{i}",
+ )
+ self.stages.append(stage)
+ prev_chs = out_chs
+
+ def call(self, hidden_states, output_hidden_states=False, return_dict=True):
+ all_hidden_states = () if output_hidden_states else None
+
+ for i, layer_module in enumerate(self.stages):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ hidden_states = layer_module(hidden_states)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
+
+ return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
+
+ def build(self, input_shape=None):
+ for stage in self.stages:
+ with tf.name_scope(stage.name):
+ stage.build(None)
+
+
+@keras_serializable
+class TFConvNextMainLayer(keras.layers.Layer):
+ config_class = ConvNextConfig
+
+ def __init__(self, config: ConvNextConfig, add_pooling_layer: bool = True, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.embeddings = TFConvNextEmbeddings(config, name="embeddings")
+ self.encoder = TFConvNextEncoder(config, name="encoder")
+ self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
+ # We are setting the `data_format` like so because from here on we will revert to the
+ # NCHW output format
+ self.pooler = keras.layers.GlobalAvgPool2D(data_format="channels_first") if add_pooling_layer else None
+
+ @unpack_inputs
+ def call(
+ self,
+ pixel_values: TFModelInputType | None = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ embedding_output = self.embeddings(pixel_values, training=training)
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ last_hidden_state = encoder_outputs[0]
+ # Change to NCHW output format have uniformity in the modules
+ last_hidden_state = tf.transpose(last_hidden_state, perm=(0, 3, 1, 2))
+ pooled_output = self.layernorm(self.pooler(last_hidden_state))
+
+ # Change the other hidden state outputs to NCHW as well
+ if output_hidden_states:
+ hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
+
+ if not return_dict:
+ hidden_states = hidden_states if output_hidden_states else ()
+ return (last_hidden_state, pooled_output) + hidden_states
+
+ return TFBaseModelOutputWithPooling(
+ last_hidden_state=last_hidden_state,
+ pooler_output=pooled_output,
+ hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embeddings", None) is not None:
+ with tf.name_scope(self.embeddings.name):
+ self.embeddings.build(None)
+ if getattr(self, "encoder", None) is not None:
+ with tf.name_scope(self.encoder.name):
+ self.encoder.build(None)
+ if getattr(self, "layernorm", None) is not None:
+ with tf.name_scope(self.layernorm.name):
+ self.layernorm.build([None, self.config.hidden_sizes[-1]])
+
+
+class TFConvNextPreTrainedModel(TFPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = ConvNextConfig
+ base_model_prefix = "convnext"
+ main_input_name = "pixel_values"
+
+
+CONVNEXT_START_DOCSTRING = r"""
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TensorFlow models and layers in `transformers` accept two formats as input:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional argument.
+
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
+ positional argument:
+
+ - a single Tensor with `pixel_values` only and nothing else: `model(pixel_values)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([pixel_values, attention_mask])` or `model([pixel_values, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})`
+
+ Note that when creating models and layers with
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
+ about any of this, as you can just pass inputs like you would to any other Python function!
+
+
+
+ Parameters:
+ config ([`ConvNextConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+CONVNEXT_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
+ [`ConvNextImageProcessor.__call__`] for details.
+
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+"""
+
+
+@add_start_docstrings(
+ "The bare ConvNext model outputting raw features without any specific head on top.",
+ CONVNEXT_START_DOCSTRING,
+)
+class TFConvNextModel(TFConvNextPreTrainedModel):
+ def __init__(self, config, *inputs, add_pooling_layer=True, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.convnext = TFConvNextMainLayer(config, add_pooling_layer=add_pooling_layer, name="convnext")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
+ def call(
+ self,
+ pixel_values: TFModelInputType | None = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, TFConvNextModel
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224")
+ >>> model = TFConvNextModel.from_pretrained("facebook/convnext-tiny-224")
+
+ >>> inputs = image_processor(images=image, return_tensors="tf")
+ >>> outputs = model(**inputs)
+ >>> last_hidden_states = outputs.last_hidden_state
+ ```"""
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ outputs = self.convnext(
+ pixel_values=pixel_values,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ if not return_dict:
+ return (outputs[0],) + outputs[1:]
+
+ return TFBaseModelOutputWithPooling(
+ last_hidden_state=outputs.last_hidden_state,
+ pooler_output=outputs.pooler_output,
+ hidden_states=outputs.hidden_states,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "convnext", None) is not None:
+ with tf.name_scope(self.convnext.name):
+ self.convnext.build(None)
+
+
+@add_start_docstrings(
+ """
+ ConvNext Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
+ ImageNet.
+ """,
+ CONVNEXT_START_DOCSTRING,
+)
+class TFConvNextForImageClassification(TFConvNextPreTrainedModel, TFSequenceClassificationLoss):
+ def __init__(self, config: ConvNextConfig, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.num_labels = config.num_labels
+ self.convnext = TFConvNextMainLayer(config, name="convnext")
+
+ # Classifier head
+ self.classifier = keras.layers.Dense(
+ units=config.num_labels,
+ kernel_initializer=get_initializer(config.initializer_range),
+ bias_initializer="zeros",
+ name="classifier",
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
+ def call(
+ self,
+ pixel_values: TFModelInputType | None = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, TFConvNextForImageClassification
+ >>> import tensorflow as tf
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224")
+ >>> model = TFConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224")
+
+ >>> inputs = image_processor(images=image, return_tensors="tf")
+ >>> outputs = model(**inputs)
+ >>> logits = outputs.logits
+ >>> # model predicts one of the 1000 ImageNet classes
+ >>> predicted_class_idx = tf.math.argmax(logits, axis=-1)[0]
+ >>> print("Predicted class:", model.config.id2label[int(predicted_class_idx)])
+ ```"""
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ outputs = self.convnext(
+ pixel_values,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
+
+ logits = self.classifier(pooled_output)
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFSequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "convnext", None) is not None:
+ with tf.name_scope(self.convnext.name):
+ self.convnext.build(None)
+ if getattr(self, "classifier", None) is not None:
+ if hasattr(self.classifier, "name"):
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.hidden_sizes[-1]])
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/dit/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/dit/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/dit/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/dit/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..669076080a523bad47fbca778a57dafc42a73d23
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/dit/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/dit/__pycache__/convert_dit_unilm_to_pytorch.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/dit/__pycache__/convert_dit_unilm_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9e4b692493685b91060bec5c2381af83a2efcce8
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/dit/__pycache__/convert_dit_unilm_to_pytorch.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/dit/convert_dit_unilm_to_pytorch.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/dit/convert_dit_unilm_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..c754b9bbf3eac7b6c5d50aa546383334c5adbf54
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/dit/convert_dit_unilm_to_pytorch.py
@@ -0,0 +1,231 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert DiT checkpoints from the unilm repository."""
+
+
+import argparse
+import json
+from pathlib import Path
+
+import requests
+import torch
+from huggingface_hub import hf_hub_download
+from PIL import Image
+
+from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
+from transformers.image_utils import PILImageResampling
+from transformers.utils import logging
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+
+# here we list all keys to be renamed (original name on the left, our name on the right)
+def create_rename_keys(config, has_lm_head=False, is_semantic=False):
+ prefix = "backbone." if is_semantic else ""
+
+ rename_keys = []
+ for i in range(config.num_hidden_layers):
+ # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
+ rename_keys.append((f"{prefix}blocks.{i}.norm1.weight", f"beit.encoder.layer.{i}.layernorm_before.weight"))
+ rename_keys.append((f"{prefix}blocks.{i}.norm1.bias", f"beit.encoder.layer.{i}.layernorm_before.bias"))
+ rename_keys.append(
+ (f"{prefix}blocks.{i}.attn.proj.weight", f"beit.encoder.layer.{i}.attention.output.dense.weight")
+ )
+ rename_keys.append(
+ (f"{prefix}blocks.{i}.attn.proj.bias", f"beit.encoder.layer.{i}.attention.output.dense.bias")
+ )
+ rename_keys.append((f"{prefix}blocks.{i}.norm2.weight", f"beit.encoder.layer.{i}.layernorm_after.weight"))
+ rename_keys.append((f"{prefix}blocks.{i}.norm2.bias", f"beit.encoder.layer.{i}.layernorm_after.bias"))
+ rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.weight", f"beit.encoder.layer.{i}.intermediate.dense.weight"))
+ rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.bias", f"beit.encoder.layer.{i}.intermediate.dense.bias"))
+ rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.weight", f"beit.encoder.layer.{i}.output.dense.weight"))
+ rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.bias", f"beit.encoder.layer.{i}.output.dense.bias"))
+
+ # projection layer + position embeddings
+ rename_keys.extend(
+ [
+ (f"{prefix}cls_token", "beit.embeddings.cls_token"),
+ (f"{prefix}patch_embed.proj.weight", "beit.embeddings.patch_embeddings.projection.weight"),
+ (f"{prefix}patch_embed.proj.bias", "beit.embeddings.patch_embeddings.projection.bias"),
+ (f"{prefix}pos_embed", "beit.embeddings.position_embeddings"),
+ ]
+ )
+
+ if has_lm_head:
+ # mask token + layernorm
+ rename_keys.extend(
+ [
+ ("mask_token", "beit.embeddings.mask_token"),
+ ("norm.weight", "layernorm.weight"),
+ ("norm.bias", "layernorm.bias"),
+ ]
+ )
+ else:
+ # layernorm + classification head
+ rename_keys.extend(
+ [
+ ("fc_norm.weight", "beit.pooler.layernorm.weight"),
+ ("fc_norm.bias", "beit.pooler.layernorm.bias"),
+ ("head.weight", "classifier.weight"),
+ ("head.bias", "classifier.bias"),
+ ]
+ )
+
+ return rename_keys
+
+
+# we split up the matrix of each encoder layer into queries, keys and values
+def read_in_q_k_v(state_dict, config, has_lm_head=False, is_semantic=False):
+ for i in range(config.num_hidden_layers):
+ prefix = "backbone." if is_semantic else ""
+ # queries, keys and values
+ in_proj_weight = state_dict.pop(f"{prefix}blocks.{i}.attn.qkv.weight")
+ q_bias = state_dict.pop(f"{prefix}blocks.{i}.attn.q_bias")
+ v_bias = state_dict.pop(f"{prefix}blocks.{i}.attn.v_bias")
+
+ state_dict[f"beit.encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[
+ : config.hidden_size, :
+ ]
+ state_dict[f"beit.encoder.layer.{i}.attention.attention.query.bias"] = q_bias
+ state_dict[f"beit.encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[
+ config.hidden_size : config.hidden_size * 2, :
+ ]
+ state_dict[f"beit.encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[
+ -config.hidden_size :, :
+ ]
+ state_dict[f"beit.encoder.layer.{i}.attention.attention.value.bias"] = v_bias
+
+ # gamma_1 and gamma_2
+ # we call them lambda because otherwise they are renamed when using .from_pretrained
+ gamma_1 = state_dict.pop(f"{prefix}blocks.{i}.gamma_1")
+ gamma_2 = state_dict.pop(f"{prefix}blocks.{i}.gamma_2")
+
+ state_dict[f"beit.encoder.layer.{i}.lambda_1"] = gamma_1
+ state_dict[f"beit.encoder.layer.{i}.lambda_2"] = gamma_2
+
+
+def rename_key(dct, old, new):
+ val = dct.pop(old)
+ dct[new] = val
+
+
+# We will verify our results on an image of cute cats
+def prepare_img():
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ im = Image.open(requests.get(url, stream=True).raw)
+ return im
+
+
+@torch.no_grad()
+def convert_dit_checkpoint(checkpoint_url, pytorch_dump_folder_path, push_to_hub=False):
+ """
+ Copy/paste/tweak model's weights to our BEiT structure.
+ """
+
+ # define default BEiT configuration
+ has_lm_head = False if "rvlcdip" in checkpoint_url else True
+ config = BeitConfig(use_absolute_position_embeddings=True, use_mask_token=has_lm_head)
+
+ # size of the architecture
+ if "large" in checkpoint_url or "dit-l" in checkpoint_url:
+ config.hidden_size = 1024
+ config.intermediate_size = 4096
+ config.num_hidden_layers = 24
+ config.num_attention_heads = 16
+
+ # labels
+ if "rvlcdip" in checkpoint_url:
+ config.num_labels = 16
+ repo_id = "huggingface/label-files"
+ filename = "rvlcdip-id2label.json"
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
+ id2label = {int(k): v for k, v in id2label.items()}
+ config.id2label = id2label
+ config.label2id = {v: k for k, v in id2label.items()}
+
+ # load state_dict of original model, remove and rename some keys
+ state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")["model"]
+
+ rename_keys = create_rename_keys(config, has_lm_head=has_lm_head)
+ for src, dest in rename_keys:
+ rename_key(state_dict, src, dest)
+ read_in_q_k_v(state_dict, config, has_lm_head=has_lm_head)
+
+ # load HuggingFace model
+ model = BeitForMaskedImageModeling(config) if has_lm_head else BeitForImageClassification(config)
+ model.eval()
+ model.load_state_dict(state_dict)
+
+ # Check outputs on an image
+ image_processor = BeitImageProcessor(
+ size=config.image_size, resample=PILImageResampling.BILINEAR, do_center_crop=False
+ )
+ image = prepare_img()
+
+ encoding = image_processor(images=image, return_tensors="pt")
+ pixel_values = encoding["pixel_values"]
+
+ outputs = model(pixel_values)
+ logits = outputs.logits
+
+ # verify logits
+ expected_shape = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192]
+ assert logits.shape == torch.Size(expected_shape), "Shape of logits not as expected"
+
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
+ print(f"Saving model to {pytorch_dump_folder_path}")
+ model.save_pretrained(pytorch_dump_folder_path)
+ print(f"Saving image processor to {pytorch_dump_folder_path}")
+ image_processor.save_pretrained(pytorch_dump_folder_path)
+
+ if push_to_hub:
+ if has_lm_head:
+ model_name = "dit-base" if "base" in checkpoint_url else "dit-large"
+ else:
+ model_name = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
+ image_processor.push_to_hub(
+ repo_path_or_name=Path(pytorch_dump_folder_path, model_name),
+ organization="nielsr",
+ commit_message="Add image processor",
+ use_temp_dir=True,
+ )
+ model.push_to_hub(
+ repo_path_or_name=Path(pytorch_dump_folder_path, model_name),
+ organization="nielsr",
+ commit_message="Add model",
+ use_temp_dir=True,
+ )
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument(
+ "--checkpoint_url",
+ default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
+ type=str,
+ help="URL to the original PyTorch checkpoint (.pth file).",
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
+ )
+ parser.add_argument(
+ "--push_to_hub",
+ action="store_true",
+ )
+ args = parser.parse_args()
+ convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..94788dcb85e76faa2f312df8d13f5577c21a88d1
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__init__.py
@@ -0,0 +1,75 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
+
+
+_import_structure = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
+
+try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["feature_extraction_glpn"] = ["GLPNFeatureExtractor"]
+ _import_structure["image_processing_glpn"] = ["GLPNImageProcessor"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_glpn"] = [
+ "GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "GLPNForDepthEstimation",
+ "GLPNLayer",
+ "GLPNModel",
+ "GLPNPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
+
+ try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .feature_extraction_glpn import GLPNFeatureExtractor
+ from .image_processing_glpn import GLPNImageProcessor
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_glpn import (
+ GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
+ GLPNForDepthEstimation,
+ GLPNLayer,
+ GLPNModel,
+ GLPNPreTrainedModel,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d002a7f396b693225ca43efff9bf48996ab385e8
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/configuration_glpn.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/configuration_glpn.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cbd0cdf0278c11dfaf7279085b0428813fcf949e
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/configuration_glpn.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/convert_glpn_to_pytorch.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/convert_glpn_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..373b782408e6dc0a94915a83f58641b9bb821789
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/convert_glpn_to_pytorch.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/feature_extraction_glpn.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/feature_extraction_glpn.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6b04dfe724d88b218e80deff1c2c41637e388462
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/feature_extraction_glpn.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/image_processing_glpn.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/image_processing_glpn.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9a0d657299db4238c2e921dc7d113724a5f2e0fa
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/image_processing_glpn.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/modeling_glpn.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/modeling_glpn.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fe1a1c425fe34d75ef43e82136dae43950cc664f
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/__pycache__/modeling_glpn.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/configuration_glpn.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/configuration_glpn.py
new file mode 100644
index 0000000000000000000000000000000000000000..5408ee94a8ade421072b97c05954fcf7a8248914
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/configuration_glpn.py
@@ -0,0 +1,137 @@
+# coding=utf-8
+# Copyright 2022 KAIST and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" GLPN model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json",
+ # See all GLPN models at https://huggingface.co/models?filter=glpn
+}
+
+
+class GLPNConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`GLPNModel`]. It is used to instantiate an GLPN
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the GLPN
+ [vinvino02/glpn-kitti](https://huggingface.co/vinvino02/glpn-kitti) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ num_channels (`int`, *optional*, defaults to 3):
+ The number of input channels.
+ num_encoder_blocks (`int`, *optional*, defaults to 4):
+ The number of encoder blocks (i.e. stages in the Mix Transformer encoder).
+ depths (`List[int]`, *optional*, defaults to `[2, 2, 2, 2]`):
+ The number of layers in each encoder block.
+ sr_ratios (`List[int]`, *optional*, defaults to `[8, 4, 2, 1]`):
+ Sequence reduction ratios in each encoder block.
+ hidden_sizes (`List[int]`, *optional*, defaults to `[32, 64, 160, 256]`):
+ Dimension of each of the encoder blocks.
+ patch_sizes (`List[int]`, *optional*, defaults to `[7, 3, 3, 3]`):
+ Patch size before each encoder block.
+ strides (`List[int]`, *optional*, defaults to `[4, 2, 2, 2]`):
+ Stride before each encoder block.
+ num_attention_heads (`List[int]`, *optional*, defaults to `[1, 2, 5, 8]`):
+ Number of attention heads for each attention layer in each block of the Transformer encoder.
+ mlp_ratios (`List[int]`, *optional*, defaults to `[4, 4, 4, 4]`):
+ Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the
+ encoder blocks.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ drop_path_rate (`float`, *optional*, defaults to 0.1):
+ The dropout probability for stochastic depth, used in the blocks of the Transformer encoder.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
+ The epsilon used by the layer normalization layers.
+ decoder_hidden_size (`int`, *optional*, defaults to 64):
+ The dimension of the decoder.
+ max_depth (`int`, *optional*, defaults to 10):
+ The maximum depth of the decoder.
+ head_in_index (`int`, *optional*, defaults to -1):
+ The index of the features to use in the head.
+
+ Example:
+
+ ```python
+ >>> from transformers import GLPNModel, GLPNConfig
+
+ >>> # Initializing a GLPN vinvino02/glpn-kitti style configuration
+ >>> configuration = GLPNConfig()
+
+ >>> # Initializing a model from the vinvino02/glpn-kitti style configuration
+ >>> model = GLPNModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "glpn"
+
+ def __init__(
+ self,
+ num_channels=3,
+ num_encoder_blocks=4,
+ depths=[2, 2, 2, 2],
+ sr_ratios=[8, 4, 2, 1],
+ hidden_sizes=[32, 64, 160, 256],
+ patch_sizes=[7, 3, 3, 3],
+ strides=[4, 2, 2, 2],
+ num_attention_heads=[1, 2, 5, 8],
+ mlp_ratios=[4, 4, 4, 4],
+ hidden_act="gelu",
+ hidden_dropout_prob=0.0,
+ attention_probs_dropout_prob=0.0,
+ initializer_range=0.02,
+ drop_path_rate=0.1,
+ layer_norm_eps=1e-6,
+ decoder_hidden_size=64,
+ max_depth=10,
+ head_in_index=-1,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.num_channels = num_channels
+ self.num_encoder_blocks = num_encoder_blocks
+ self.depths = depths
+ self.sr_ratios = sr_ratios
+ self.hidden_sizes = hidden_sizes
+ self.patch_sizes = patch_sizes
+ self.strides = strides
+ self.mlp_ratios = mlp_ratios
+ self.num_attention_heads = num_attention_heads
+ self.hidden_act = hidden_act
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.initializer_range = initializer_range
+ self.drop_path_rate = drop_path_rate
+ self.layer_norm_eps = layer_norm_eps
+ self.decoder_hidden_size = decoder_hidden_size
+ self.max_depth = max_depth
+ self.head_in_index = head_in_index
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/convert_glpn_to_pytorch.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/convert_glpn_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f0183783ec812f69766d9220efb58652a21cb87
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/convert_glpn_to_pytorch.py
@@ -0,0 +1,219 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert GLPN checkpoints."""
+
+
+import argparse
+from collections import OrderedDict
+from pathlib import Path
+
+import requests
+import torch
+from PIL import Image
+
+from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
+from transformers.utils import logging
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+
+def rename_keys(state_dict):
+ new_state_dict = OrderedDict()
+ for key, value in state_dict.items():
+ if key.startswith("module.encoder"):
+ key = key.replace("module.encoder", "glpn.encoder")
+ if key.startswith("module.decoder"):
+ key = key.replace("module.decoder", "decoder.stages")
+ if "patch_embed" in key:
+ # replace for example patch_embed1 by patch_embeddings.0
+ idx = key[key.find("patch_embed") + len("patch_embed")]
+ key = key.replace(f"patch_embed{idx}", f"patch_embeddings.{int(idx)-1}")
+ if "norm" in key:
+ key = key.replace("norm", "layer_norm")
+ if "glpn.encoder.layer_norm" in key:
+ # replace for example layer_norm1 by layer_norm.0
+ idx = key[key.find("glpn.encoder.layer_norm") + len("glpn.encoder.layer_norm")]
+ key = key.replace(f"layer_norm{idx}", f"layer_norm.{int(idx)-1}")
+ if "layer_norm1" in key:
+ key = key.replace("layer_norm1", "layer_norm_1")
+ if "layer_norm2" in key:
+ key = key.replace("layer_norm2", "layer_norm_2")
+ if "block" in key:
+ # replace for example block1 by block.0
+ idx = key[key.find("block") + len("block")]
+ key = key.replace(f"block{idx}", f"block.{int(idx)-1}")
+ if "attn.q" in key:
+ key = key.replace("attn.q", "attention.self.query")
+ if "attn.proj" in key:
+ key = key.replace("attn.proj", "attention.output.dense")
+ if "attn" in key:
+ key = key.replace("attn", "attention.self")
+ if "fc1" in key:
+ key = key.replace("fc1", "dense1")
+ if "fc2" in key:
+ key = key.replace("fc2", "dense2")
+ if "linear_pred" in key:
+ key = key.replace("linear_pred", "classifier")
+ if "linear_fuse" in key:
+ key = key.replace("linear_fuse.conv", "linear_fuse")
+ key = key.replace("linear_fuse.bn", "batch_norm")
+ if "linear_c" in key:
+ # replace for example linear_c4 by linear_c.3
+ idx = key[key.find("linear_c") + len("linear_c")]
+ key = key.replace(f"linear_c{idx}", f"linear_c.{int(idx)-1}")
+ if "bot_conv" in key:
+ key = key.replace("bot_conv", "0.convolution")
+ if "skip_conv1" in key:
+ key = key.replace("skip_conv1", "1.convolution")
+ if "skip_conv2" in key:
+ key = key.replace("skip_conv2", "2.convolution")
+ if "fusion1" in key:
+ key = key.replace("fusion1", "1.fusion")
+ if "fusion2" in key:
+ key = key.replace("fusion2", "2.fusion")
+ if "fusion3" in key:
+ key = key.replace("fusion3", "3.fusion")
+ if "fusion" in key and "conv" in key:
+ key = key.replace("conv", "convolutional_layer")
+ if key.startswith("module.last_layer_depth"):
+ key = key.replace("module.last_layer_depth", "head.head")
+ new_state_dict[key] = value
+
+ return new_state_dict
+
+
+def read_in_k_v(state_dict, config):
+ # for each of the encoder blocks:
+ for i in range(config.num_encoder_blocks):
+ for j in range(config.depths[i]):
+ # read in weights + bias of keys and values (which is a single matrix in the original implementation)
+ kv_weight = state_dict.pop(f"glpn.encoder.block.{i}.{j}.attention.self.kv.weight")
+ kv_bias = state_dict.pop(f"glpn.encoder.block.{i}.{j}.attention.self.kv.bias")
+ # next, add keys and values (in that order) to the state dict
+ state_dict[f"glpn.encoder.block.{i}.{j}.attention.self.key.weight"] = kv_weight[
+ : config.hidden_sizes[i], :
+ ]
+ state_dict[f"glpn.encoder.block.{i}.{j}.attention.self.key.bias"] = kv_bias[: config.hidden_sizes[i]]
+ state_dict[f"glpn.encoder.block.{i}.{j}.attention.self.value.weight"] = kv_weight[
+ config.hidden_sizes[i] :, :
+ ]
+ state_dict[f"glpn.encoder.block.{i}.{j}.attention.self.value.bias"] = kv_bias[config.hidden_sizes[i] :]
+
+
+# We will verify our results on a COCO image
+def prepare_img():
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ image = Image.open(requests.get(url, stream=True).raw)
+
+ return image
+
+
+@torch.no_grad()
+def convert_glpn_checkpoint(checkpoint_path, pytorch_dump_folder_path, push_to_hub=False, model_name=None):
+ """
+ Copy/paste/tweak model's weights to our GLPN structure.
+ """
+
+ # load GLPN configuration (Segformer-B4 size)
+ config = GLPNConfig(hidden_sizes=[64, 128, 320, 512], decoder_hidden_size=64, depths=[3, 8, 27, 3])
+
+ # load image processor (only resize + rescale)
+ image_processor = GLPNImageProcessor()
+
+ # prepare image
+ image = prepare_img()
+ pixel_values = image_processor(images=image, return_tensors="pt").pixel_values
+
+ logger.info("Converting model...")
+
+ # load original state dict
+ state_dict = torch.load(checkpoint_path, map_location=torch.device("cpu"))
+
+ # rename keys
+ state_dict = rename_keys(state_dict)
+
+ # key and value matrices need special treatment
+ read_in_k_v(state_dict, config)
+
+ # create HuggingFace model and load state dict
+ model = GLPNForDepthEstimation(config)
+ model.load_state_dict(state_dict)
+ model.eval()
+
+ # forward pass
+ outputs = model(pixel_values)
+ predicted_depth = outputs.predicted_depth
+
+ # verify output
+ if model_name is not None:
+ if "nyu" in model_name:
+ expected_slice = torch.tensor(
+ [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]]
+ )
+ elif "kitti" in model_name:
+ expected_slice = torch.tensor(
+ [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]]
+ )
+ else:
+ raise ValueError(f"Unknown model name: {model_name}")
+
+ expected_shape = torch.Size([1, 480, 640])
+
+ assert predicted_depth.shape == expected_shape
+ assert torch.allclose(predicted_depth[0, :3, :3], expected_slice, atol=1e-4)
+ print("Looks ok!")
+
+ # finally, push to hub if required
+ if push_to_hub:
+ logger.info("Pushing model and image processor to the hub...")
+ model.push_to_hub(
+ repo_path_or_name=Path(pytorch_dump_folder_path, model_name),
+ organization="nielsr",
+ commit_message="Add model",
+ use_temp_dir=True,
+ )
+ image_processor.push_to_hub(
+ repo_path_or_name=Path(pytorch_dump_folder_path, model_name),
+ organization="nielsr",
+ commit_message="Add image processor",
+ use_temp_dir=True,
+ )
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument(
+ "--checkpoint_path",
+ default=None,
+ type=str,
+ help="Path to the original PyTorch checkpoint (.pth file).",
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
+ )
+ parser.add_argument(
+ "--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
+ )
+ parser.add_argument(
+ "--model_name",
+ default="glpn-kitti",
+ type=str,
+ help="Name of the model in case you're pushing to the hub.",
+ )
+ args = parser.parse_args()
+ convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/feature_extraction_glpn.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/feature_extraction_glpn.py
new file mode 100644
index 0000000000000000000000000000000000000000..314268225d2af41f3cc6af55af4e21aebe087b60
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/feature_extraction_glpn.py
@@ -0,0 +1,33 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Feature extractor class for GLPN."""
+
+import warnings
+
+from ...utils import logging
+from .image_processing_glpn import GLPNImageProcessor
+
+
+logger = logging.get_logger(__name__)
+
+
+class GLPNFeatureExtractor(GLPNImageProcessor):
+ def __init__(self, *args, **kwargs) -> None:
+ warnings.warn(
+ "The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
+ " use GLPNImageProcessor instead.",
+ FutureWarning,
+ )
+ super().__init__(*args, **kwargs)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/image_processing_glpn.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/image_processing_glpn.py
new file mode 100644
index 0000000000000000000000000000000000000000..7577b4eeb3d0c20b9d023bc488f8bf3c6bb39fdd
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/image_processing_glpn.py
@@ -0,0 +1,233 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Image processor class for GLPN."""
+
+from typing import List, Optional, Union
+
+import numpy as np
+import PIL.Image
+
+from ...image_processing_utils import BaseImageProcessor, BatchFeature
+from ...image_transforms import resize, to_channel_dimension_format
+from ...image_utils import (
+ ChannelDimension,
+ PILImageResampling,
+ get_image_size,
+ infer_channel_dimension_format,
+ is_scaled_image,
+ make_list_of_images,
+ to_numpy_array,
+ valid_images,
+ validate_kwargs,
+ validate_preprocess_arguments,
+)
+from ...utils import TensorType, logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class GLPNImageProcessor(BaseImageProcessor):
+ r"""
+ Constructs a GLPN image processor.
+
+ Args:
+ do_resize (`bool`, *optional*, defaults to `True`):
+ Whether to resize the image's (height, width) dimensions, rounding them down to the closest multiple of
+ `size_divisor`. Can be overridden by `do_resize` in `preprocess`.
+ size_divisor (`int`, *optional*, defaults to 32):
+ When `do_resize` is `True`, images are resized so their height and width are rounded down to the closest
+ multiple of `size_divisor`. Can be overridden by `size_divisor` in `preprocess`.
+ resample (`PIL.Image` resampling filter, *optional*, defaults to `Resampling.BILINEAR`):
+ Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`.
+ do_rescale (`bool`, *optional*, defaults to `True`):
+ Whether or not to apply the scaling factor (to make pixel values floats between 0. and 1.). Can be
+ overridden by `do_rescale` in `preprocess`.
+ """
+
+ model_input_names = ["pixel_values"]
+
+ def __init__(
+ self,
+ do_resize: bool = True,
+ size_divisor: int = 32,
+ resample=PILImageResampling.BILINEAR,
+ do_rescale: bool = True,
+ **kwargs,
+ ) -> None:
+ self.do_resize = do_resize
+ self.do_rescale = do_rescale
+ self.size_divisor = size_divisor
+ self.resample = resample
+ super().__init__(**kwargs)
+ self._valid_processor_keys = [
+ "images",
+ "do_resize",
+ "size_divisor",
+ "resample",
+ "do_rescale",
+ "return_tensors",
+ "data_format",
+ "input_data_format",
+ ]
+
+ def resize(
+ self,
+ image: np.ndarray,
+ size_divisor: int,
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
+ data_format: Optional[ChannelDimension] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> np.ndarray:
+ """
+ Resize the image, rounding the (height, width) dimensions down to the closest multiple of size_divisor.
+
+ If the image is of dimension (3, 260, 170) and size_divisor is 32, the image will be resized to (3, 256, 160).
+
+ Args:
+ image (`np.ndarray`):
+ The image to resize.
+ size_divisor (`int`):
+ The image is resized so its height and width are rounded down to the closest multiple of
+ `size_divisor`.
+ resample:
+ `PIL.Image` resampling filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
+ data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the output image. If `None`, the channel dimension format of the input
+ image is used. Can be one of:
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format of the input image. If not set, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+
+ Returns:
+ `np.ndarray`: The resized image.
+ """
+ height, width = get_image_size(image, channel_dim=input_data_format)
+ # Rounds the height and width down to the closest multiple of size_divisor
+ new_h = height // size_divisor * size_divisor
+ new_w = width // size_divisor * size_divisor
+ image = resize(
+ image,
+ (new_h, new_w),
+ resample=resample,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ **kwargs,
+ )
+ return image
+
+ def preprocess(
+ self,
+ images: Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]],
+ do_resize: Optional[bool] = None,
+ size_divisor: Optional[int] = None,
+ resample=None,
+ do_rescale: Optional[bool] = None,
+ return_tensors: Optional[Union[TensorType, str]] = None,
+ data_format: ChannelDimension = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> BatchFeature:
+ """
+ Preprocess the given images.
+
+ Args:
+ images (`PIL.Image.Image` or `TensorType` or `List[np.ndarray]` or `List[TensorType]`):
+ Images to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
+ passing in images with pixel values between 0 and 1, set `do_normalize=False`.
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
+ Whether to resize the input such that the (height, width) dimensions are a multiple of `size_divisor`.
+ size_divisor (`int`, *optional*, defaults to `self.size_divisor`):
+ When `do_resize` is `True`, images are resized so their height and width are rounded down to the
+ closest multiple of `size_divisor`.
+ resample (`PIL.Image` resampling filter, *optional*, defaults to `self.resample`):
+ `PIL.Image` resampling filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has
+ an effect if `do_resize` is set to `True`.
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
+ Whether or not to apply the scaling factor (to make pixel values floats between 0. and 1.).
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Can be one of:
+ - `None`: Return a list of `np.ndarray`.
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format for the output image. Can be one of:
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ """
+ do_resize = do_resize if do_resize is not None else self.do_resize
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
+ size_divisor = size_divisor if size_divisor is not None else self.size_divisor
+ resample = resample if resample is not None else self.resample
+
+ images = make_list_of_images(images)
+
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
+
+ if not valid_images(images):
+ raise ValueError(
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+
+ # Here, the rescale() method uses a constant rescale_factor. It does not need to be validated
+ # with a rescale_factor.
+ validate_preprocess_arguments(
+ do_resize=do_resize,
+ size=size_divisor, # Here, size_divisor is used as a parameter for optimal resizing instead of size.
+ resample=resample,
+ )
+
+ # All transformations expect numpy arrays.
+ images = [to_numpy_array(img) for img in images]
+
+ if is_scaled_image(images[0]) and do_rescale:
+ logger.warning_once(
+ "It looks like you are trying to rescale already rescaled images. If the input"
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
+ )
+
+ if input_data_format is None:
+ # We assume that all images have the same channel dimension format.
+ input_data_format = infer_channel_dimension_format(images[0])
+
+ if do_resize:
+ images = [
+ self.resize(image, size_divisor=size_divisor, resample=resample, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ if do_rescale:
+ images = [self.rescale(image, scale=1 / 255, input_data_format=input_data_format) for image in images]
+
+ images = [
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
+ ]
+
+ data = {"pixel_values": images}
+ return BatchFeature(data=data, tensor_type=return_tensors)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/modeling_glpn.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/modeling_glpn.py
new file mode 100644
index 0000000000000000000000000000000000000000..d2ddef5c41e1e519ecb14ea9bea468ca07c7929d
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/glpn/modeling_glpn.py
@@ -0,0 +1,780 @@
+# coding=utf-8
+# Copyright 2022 KAIST and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch GLPN model."""
+
+
+import math
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+
+from ...activations import ACT2FN
+from ...modeling_outputs import BaseModelOutput, DepthEstimatorOutput
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_glpn import GLPNConfig
+
+
+logger = logging.get_logger(__name__)
+
+
+# General docstring
+_CONFIG_FOR_DOC = "GLPNConfig"
+
+# Base docstring
+_CHECKPOINT_FOR_DOC = "vinvino02/glpn-kitti"
+_EXPECTED_OUTPUT_SHAPE = [1, 512, 15, 20]
+
+GLPN_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "vinvino02/glpn-kitti",
+ # See all GLPN models at https://huggingface.co/models?filter=glpn
+]
+
+
+# Copied from transformers.models.beit.modeling_beit.drop_path
+def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
+ """
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
+
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
+ argument.
+ """
+ if drop_prob == 0.0 or not training:
+ return input
+ keep_prob = 1 - drop_prob
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
+ random_tensor.floor_() # binarize
+ output = input.div(keep_prob) * random_tensor
+ return output
+
+
+# Copied from transformers.models.segformer.modeling_segformer.SegformerDropPath
+class GLPNDropPath(nn.Module):
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
+
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
+ super().__init__()
+ self.drop_prob = drop_prob
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ return drop_path(hidden_states, self.drop_prob, self.training)
+
+ def extra_repr(self) -> str:
+ return "p={}".format(self.drop_prob)
+
+
+# Copied from transformers.models.segformer.modeling_segformer.SegformerOverlapPatchEmbeddings
+class GLPNOverlapPatchEmbeddings(nn.Module):
+ """Construct the overlapping patch embeddings."""
+
+ def __init__(self, patch_size, stride, num_channels, hidden_size):
+ super().__init__()
+ self.proj = nn.Conv2d(
+ num_channels,
+ hidden_size,
+ kernel_size=patch_size,
+ stride=stride,
+ padding=patch_size // 2,
+ )
+
+ self.layer_norm = nn.LayerNorm(hidden_size)
+
+ def forward(self, pixel_values):
+ embeddings = self.proj(pixel_values)
+ _, _, height, width = embeddings.shape
+ # (batch_size, num_channels, height, width) -> (batch_size, num_channels, height*width) -> (batch_size, height*width, num_channels)
+ # this can be fed to a Transformer layer
+ embeddings = embeddings.flatten(2).transpose(1, 2)
+ embeddings = self.layer_norm(embeddings)
+ return embeddings, height, width
+
+
+# Copied from transformers.models.segformer.modeling_segformer.SegformerEfficientSelfAttention
+class GLPNEfficientSelfAttention(nn.Module):
+ """SegFormer's efficient self-attention mechanism. Employs the sequence reduction process introduced in the [PvT
+ paper](https://arxiv.org/abs/2102.12122)."""
+
+ def __init__(self, config, hidden_size, num_attention_heads, sequence_reduction_ratio):
+ super().__init__()
+ self.hidden_size = hidden_size
+ self.num_attention_heads = num_attention_heads
+
+ if self.hidden_size % self.num_attention_heads != 0:
+ raise ValueError(
+ f"The hidden size ({self.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({self.num_attention_heads})"
+ )
+
+ self.attention_head_size = int(self.hidden_size / self.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(self.hidden_size, self.all_head_size)
+ self.key = nn.Linear(self.hidden_size, self.all_head_size)
+ self.value = nn.Linear(self.hidden_size, self.all_head_size)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+
+ self.sr_ratio = sequence_reduction_ratio
+ if sequence_reduction_ratio > 1:
+ self.sr = nn.Conv2d(
+ hidden_size, hidden_size, kernel_size=sequence_reduction_ratio, stride=sequence_reduction_ratio
+ )
+ self.layer_norm = nn.LayerNorm(hidden_size)
+
+ def transpose_for_scores(self, hidden_states):
+ new_shape = hidden_states.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ hidden_states = hidden_states.view(new_shape)
+ return hidden_states.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ hidden_states,
+ height,
+ width,
+ output_attentions=False,
+ ):
+ query_layer = self.transpose_for_scores(self.query(hidden_states))
+
+ if self.sr_ratio > 1:
+ batch_size, seq_len, num_channels = hidden_states.shape
+ # Reshape to (batch_size, num_channels, height, width)
+ hidden_states = hidden_states.permute(0, 2, 1).reshape(batch_size, num_channels, height, width)
+ # Apply sequence reduction
+ hidden_states = self.sr(hidden_states)
+ # Reshape back to (batch_size, seq_len, num_channels)
+ hidden_states = hidden_states.reshape(batch_size, num_channels, -1).permute(0, 2, 1)
+ hidden_states = self.layer_norm(hidden_states)
+
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ return outputs
+
+
+# Copied from transformers.models.segformer.modeling_segformer.SegformerSelfOutput
+class GLPNSelfOutput(nn.Module):
+ def __init__(self, config, hidden_size):
+ super().__init__()
+ self.dense = nn.Linear(hidden_size, hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states, input_tensor):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.segformer.modeling_segformer.SegformerAttention with Segformer->GLPN
+class GLPNAttention(nn.Module):
+ def __init__(self, config, hidden_size, num_attention_heads, sequence_reduction_ratio):
+ super().__init__()
+ self.self = GLPNEfficientSelfAttention(
+ config=config,
+ hidden_size=hidden_size,
+ num_attention_heads=num_attention_heads,
+ sequence_reduction_ratio=sequence_reduction_ratio,
+ )
+ self.output = GLPNSelfOutput(config, hidden_size=hidden_size)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.self.query = prune_linear_layer(self.self.query, index)
+ self.self.key = prune_linear_layer(self.self.key, index)
+ self.self.value = prune_linear_layer(self.self.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(self, hidden_states, height, width, output_attentions=False):
+ self_outputs = self.self(hidden_states, height, width, output_attentions)
+
+ attention_output = self.output(self_outputs[0], hidden_states)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.segformer.modeling_segformer.SegformerDWConv
+class GLPNDWConv(nn.Module):
+ def __init__(self, dim=768):
+ super().__init__()
+ self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim)
+
+ def forward(self, hidden_states, height, width):
+ batch_size, seq_len, num_channels = hidden_states.shape
+ hidden_states = hidden_states.transpose(1, 2).view(batch_size, num_channels, height, width)
+ hidden_states = self.dwconv(hidden_states)
+ hidden_states = hidden_states.flatten(2).transpose(1, 2)
+
+ return hidden_states
+
+
+# Copied from transformers.models.segformer.modeling_segformer.SegformerMixFFN with Segformer->GLPN
+class GLPNMixFFN(nn.Module):
+ def __init__(self, config, in_features, hidden_features=None, out_features=None):
+ super().__init__()
+ out_features = out_features or in_features
+ self.dense1 = nn.Linear(in_features, hidden_features)
+ self.dwconv = GLPNDWConv(hidden_features)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+ self.dense2 = nn.Linear(hidden_features, out_features)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states, height, width):
+ hidden_states = self.dense1(hidden_states)
+ hidden_states = self.dwconv(hidden_states, height, width)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.dense2(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.segformer.modeling_segformer.SegformerLayer with Segformer->GLPN
+class GLPNLayer(nn.Module):
+ """This corresponds to the Block class in the original implementation."""
+
+ def __init__(self, config, hidden_size, num_attention_heads, drop_path, sequence_reduction_ratio, mlp_ratio):
+ super().__init__()
+ self.layer_norm_1 = nn.LayerNorm(hidden_size)
+ self.attention = GLPNAttention(
+ config,
+ hidden_size=hidden_size,
+ num_attention_heads=num_attention_heads,
+ sequence_reduction_ratio=sequence_reduction_ratio,
+ )
+ self.drop_path = GLPNDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
+ self.layer_norm_2 = nn.LayerNorm(hidden_size)
+ mlp_hidden_size = int(hidden_size * mlp_ratio)
+ self.mlp = GLPNMixFFN(config, in_features=hidden_size, hidden_features=mlp_hidden_size)
+
+ def forward(self, hidden_states, height, width, output_attentions=False):
+ self_attention_outputs = self.attention(
+ self.layer_norm_1(hidden_states), # in GLPN, layernorm is applied before self-attention
+ height,
+ width,
+ output_attentions=output_attentions,
+ )
+
+ attention_output = self_attention_outputs[0]
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ # first residual connection (with stochastic depth)
+ attention_output = self.drop_path(attention_output)
+ hidden_states = attention_output + hidden_states
+
+ mlp_output = self.mlp(self.layer_norm_2(hidden_states), height, width)
+
+ # second residual connection (with stochastic depth)
+ mlp_output = self.drop_path(mlp_output)
+ layer_output = mlp_output + hidden_states
+
+ outputs = (layer_output,) + outputs
+
+ return outputs
+
+
+class GLPNEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+
+ # stochastic depth decay rule
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))]
+
+ # patch embeddings
+ embeddings = []
+ for i in range(config.num_encoder_blocks):
+ embeddings.append(
+ GLPNOverlapPatchEmbeddings(
+ patch_size=config.patch_sizes[i],
+ stride=config.strides[i],
+ num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1],
+ hidden_size=config.hidden_sizes[i],
+ )
+ )
+ self.patch_embeddings = nn.ModuleList(embeddings)
+
+ # Transformer blocks
+ blocks = []
+ cur = 0
+ for i in range(config.num_encoder_blocks):
+ # each block consists of layers
+ layers = []
+ if i != 0:
+ cur += config.depths[i - 1]
+ for j in range(config.depths[i]):
+ layers.append(
+ GLPNLayer(
+ config,
+ hidden_size=config.hidden_sizes[i],
+ num_attention_heads=config.num_attention_heads[i],
+ drop_path=dpr[cur + j],
+ sequence_reduction_ratio=config.sr_ratios[i],
+ mlp_ratio=config.mlp_ratios[i],
+ )
+ )
+ blocks.append(nn.ModuleList(layers))
+
+ self.block = nn.ModuleList(blocks)
+
+ # Layer norms
+ self.layer_norm = nn.ModuleList(
+ [nn.LayerNorm(config.hidden_sizes[i]) for i in range(config.num_encoder_blocks)]
+ )
+
+ def forward(
+ self,
+ pixel_values,
+ output_attentions=False,
+ output_hidden_states=False,
+ return_dict=True,
+ ):
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ batch_size = pixel_values.shape[0]
+
+ hidden_states = pixel_values
+ for idx, x in enumerate(zip(self.patch_embeddings, self.block, self.layer_norm)):
+ embedding_layer, block_layer, norm_layer = x
+ # first, obtain patch embeddings
+ hidden_states, height, width = embedding_layer(hidden_states)
+ # second, send embeddings through blocks
+ for i, blk in enumerate(block_layer):
+ layer_outputs = blk(hidden_states, height, width, output_attentions)
+ hidden_states = layer_outputs[0]
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+ # third, apply layer norm
+ hidden_states = norm_layer(hidden_states)
+ # fourth, optionally reshape back to (batch_size, num_channels, height, width)
+ hidden_states = hidden_states.reshape(batch_size, height, width, -1).permute(0, 3, 1, 2).contiguous()
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+
+class GLPNPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = GLPNConfig
+ base_model_prefix = "glpn"
+ main_input_name = "pixel_values"
+
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerPreTrainedModel._init_weights
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+GLPN_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`GLPNConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+GLPN_INPUTS_DOCSTRING = r"""
+
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
+ [`AutoImageProcessor`]. See [`GLPNImageProcessor.__call__`] for details.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare GLPN encoder (Mix-Transformer) outputting raw hidden-states without any specific head on top.",
+ GLPN_START_DOCSTRING,
+)
+class GLPNModel(GLPNPreTrainedModel):
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerModel.__init__ with Segformer->GLPN
+ def __init__(self, config):
+ super().__init__(config)
+ self.config = config
+
+ # hierarchical Transformer encoder
+ self.encoder = GLPNEncoder(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(GLPN_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ modality="vision",
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
+ )
+ # Copied from transformers.models.segformer.modeling_segformer.SegformerModel.forward
+ def forward(
+ self,
+ pixel_values: torch.FloatTensor,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ encoder_outputs = self.encoder(
+ pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+
+ if not return_dict:
+ return (sequence_output,) + encoder_outputs[1:]
+
+ return BaseModelOutput(
+ last_hidden_state=sequence_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+class GLPNSelectiveFeatureFusion(nn.Module):
+ """
+ Selective Feature Fusion module, as explained in the [paper](https://arxiv.org/abs/2201.07436) (section 3.4). This
+ module adaptively selects and integrates local and global features by attaining an attention map for each feature.
+ """
+
+ def __init__(self, in_channel=64):
+ super().__init__()
+
+ self.convolutional_layer1 = nn.Sequential(
+ nn.Conv2d(in_channels=int(in_channel * 2), out_channels=in_channel, kernel_size=3, stride=1, padding=1),
+ nn.BatchNorm2d(in_channel),
+ nn.ReLU(),
+ )
+
+ self.convolutional_layer2 = nn.Sequential(
+ nn.Conv2d(in_channels=in_channel, out_channels=int(in_channel / 2), kernel_size=3, stride=1, padding=1),
+ nn.BatchNorm2d(int(in_channel / 2)),
+ nn.ReLU(),
+ )
+
+ self.convolutional_layer3 = nn.Conv2d(
+ in_channels=int(in_channel / 2), out_channels=2, kernel_size=3, stride=1, padding=1
+ )
+
+ self.sigmoid = nn.Sigmoid()
+
+ def forward(self, local_features, global_features):
+ # concatenate features along the channel dimension
+ features = torch.cat((local_features, global_features), dim=1)
+ # pass through convolutional layers
+ features = self.convolutional_layer1(features)
+ features = self.convolutional_layer2(features)
+ features = self.convolutional_layer3(features)
+ # apply sigmoid to get two-channel attention map
+ attn = self.sigmoid(features)
+ # construct hybrid features by adding element-wise
+ hybrid_features = local_features * attn[:, 0, :, :].unsqueeze(1) + global_features * attn[
+ :, 1, :, :
+ ].unsqueeze(1)
+
+ return hybrid_features
+
+
+class GLPNDecoderStage(nn.Module):
+ def __init__(self, in_channels, out_channels):
+ super().__init__()
+ should_skip = in_channels == out_channels
+ self.convolution = nn.Conv2d(in_channels, out_channels, kernel_size=1) if not should_skip else nn.Identity()
+ self.fusion = GLPNSelectiveFeatureFusion(out_channels)
+ self.upsample = nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False)
+
+ def forward(self, hidden_state, residual=None):
+ hidden_state = self.convolution(hidden_state)
+ if residual is not None:
+ hidden_state = self.fusion(hidden_state, residual)
+ hidden_state = self.upsample(hidden_state)
+
+ return hidden_state
+
+ hidden_state = self.upsample(hidden_state)
+ return hidden_state
+
+
+class GLPNDecoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ # we use features from end -> start
+ reserved_hidden_sizes = config.hidden_sizes[::-1]
+ out_channels = config.decoder_hidden_size
+
+ self.stages = nn.ModuleList(
+ [GLPNDecoderStage(hidden_size, out_channels) for hidden_size in reserved_hidden_sizes]
+ )
+ # don't fuse in first stage
+ self.stages[0].fusion = None
+
+ self.final_upsample = nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False)
+
+ def forward(self, hidden_states: List[torch.Tensor]) -> List[torch.Tensor]:
+ stage_hidden_states = []
+ stage_hidden_state = None
+ for hidden_state, stage in zip(hidden_states[::-1], self.stages):
+ stage_hidden_state = stage(hidden_state, stage_hidden_state)
+ stage_hidden_states.append(stage_hidden_state)
+
+ stage_hidden_states[-1] = self.final_upsample(stage_hidden_state)
+
+ return stage_hidden_states
+
+
+class SiLogLoss(nn.Module):
+ r"""
+ Implements the Scale-invariant log scale loss [Eigen et al., 2014](https://arxiv.org/abs/1406.2283).
+
+ $$L=\frac{1}{n} \sum_{i} d_{i}^{2}-\frac{1}{2 n^{2}}\left(\sum_{i} d_{i}^{2}\right)$$ where $d_{i}=\log y_{i}-\log
+ y_{i}^{*}$.
+
+ """
+
+ def __init__(self, lambd=0.5):
+ super().__init__()
+ self.lambd = lambd
+
+ def forward(self, pred, target):
+ valid_mask = (target > 0).detach()
+ diff_log = torch.log(target[valid_mask]) - torch.log(pred[valid_mask])
+ loss = torch.sqrt(torch.pow(diff_log, 2).mean() - self.lambd * torch.pow(diff_log.mean(), 2))
+
+ return loss
+
+
+class GLPNDepthEstimationHead(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+
+ self.config = config
+
+ channels = config.decoder_hidden_size
+ self.head = nn.Sequential(
+ nn.Conv2d(channels, channels, kernel_size=3, stride=1, padding=1),
+ nn.ReLU(inplace=False),
+ nn.Conv2d(channels, 1, kernel_size=3, stride=1, padding=1),
+ )
+
+ def forward(self, hidden_states: List[torch.Tensor]) -> torch.Tensor:
+ # use last features of the decoder
+ hidden_states = hidden_states[self.config.head_in_index]
+
+ hidden_states = self.head(hidden_states)
+
+ predicted_depth = torch.sigmoid(hidden_states) * self.config.max_depth
+ predicted_depth = predicted_depth.squeeze(dim=1)
+
+ return predicted_depth
+
+
+@add_start_docstrings(
+ """GLPN Model transformer with a lightweight depth estimation head on top e.g. for KITTI, NYUv2.""",
+ GLPN_START_DOCSTRING,
+)
+class GLPNForDepthEstimation(GLPNPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.glpn = GLPNModel(config)
+ self.decoder = GLPNDecoder(config)
+ self.head = GLPNDepthEstimationHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(GLPN_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=DepthEstimatorOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: torch.FloatTensor,
+ labels: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], DepthEstimatorOutput]:
+ r"""
+ labels (`torch.FloatTensor` of shape `(batch_size, height, width)`, *optional*):
+ Ground truth depth estimation maps for computing the loss.
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, GLPNForDepthEstimation
+ >>> import torch
+ >>> import numpy as np
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("vinvino02/glpn-kitti")
+ >>> model = GLPNForDepthEstimation.from_pretrained("vinvino02/glpn-kitti")
+
+ >>> # prepare image for the model
+ >>> inputs = image_processor(images=image, return_tensors="pt")
+
+ >>> with torch.no_grad():
+ ... outputs = model(**inputs)
+ ... predicted_depth = outputs.predicted_depth
+
+ >>> # interpolate to original size
+ >>> prediction = torch.nn.functional.interpolate(
+ ... predicted_depth.unsqueeze(1),
+ ... size=image.size[::-1],
+ ... mode="bicubic",
+ ... align_corners=False,
+ ... )
+
+ >>> # visualize the prediction
+ >>> output = prediction.squeeze().cpu().numpy()
+ >>> formatted = (output * 255 / np.max(output)).astype("uint8")
+ >>> depth = Image.fromarray(formatted)
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+
+ outputs = self.glpn(
+ pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=True, # we need the intermediate hidden states
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs.hidden_states if return_dict else outputs[1]
+
+ out = self.decoder(hidden_states)
+ predicted_depth = self.head(out)
+
+ loss = None
+ if labels is not None:
+ loss_fct = SiLogLoss()
+ loss = loss_fct(predicted_depth, labels)
+
+ if not return_dict:
+ if output_hidden_states:
+ output = (predicted_depth,) + outputs[1:]
+ else:
+ output = (predicted_depth,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return DepthEstimatorOutput(
+ loss=loss,
+ predicted_depth=predicted_depth,
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
+ attentions=outputs.attentions,
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3b45c6b6ddbff905e9317ad481658c35bac68fcb
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/configuration_lxmert.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/configuration_lxmert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..24047ee038221e383052e65847e406dfc30a37ef
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/configuration_lxmert.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/convert_lxmert_original_tf_checkpoint_to_pytorch.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/convert_lxmert_original_tf_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c97b4cf7aa54a229ffb4462e5bada0c58c14b2d4
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/convert_lxmert_original_tf_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/modeling_lxmert.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/modeling_lxmert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4440dbdc7a4aa45590be5f482d8dce30169b728a
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/modeling_lxmert.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/modeling_tf_lxmert.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/modeling_tf_lxmert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bae580f1f6e8c24b15bec564cf0249b51640cb9b
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/modeling_tf_lxmert.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/tokenization_lxmert.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/tokenization_lxmert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..875625bfb4815f99185cf41c7e7a08c9eea31f2a
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/tokenization_lxmert.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/tokenization_lxmert_fast.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/tokenization_lxmert_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..34a5f602908c416e54d19639d184d8c5183bd151
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/tokenization_lxmert_fast.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/configuration_lxmert.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/configuration_lxmert.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ced7d2acadf4e048ed18482d960ab5be0da0126
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/configuration_lxmert.py
@@ -0,0 +1,171 @@
+# coding=utf-8
+# Copyright 2018, Hao Tan, Mohit Bansal
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" LXMERT model configuration"""
+
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
+}
+
+
+class LxmertConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`LxmertModel`] or a [`TFLxmertModel`]. It is used
+ to instantiate a LXMERT model according to the specified arguments, defining the model architecture. Instantiating
+ a configuration with the defaults will yield a similar configuration to that of the Lxmert
+ [unc-nlp/lxmert-base-uncased](https://huggingface.co/unc-nlp/lxmert-base-uncased) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 30522):
+ Vocabulary size of the LXMERT model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`LxmertModel`] or [`TFLxmertModel`].
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ num_qa_labels (`int`, *optional*, defaults to 9500):
+ This represents the total number of different question answering (QA) labels there are. If using more than
+ one dataset with QA, the user will need to account for the total number of labels that all of the datasets
+ have in total.
+ num_object_labels (`int`, *optional*, defaults to 1600):
+ This represents the total number of semantically unique objects that lxmert will be able to classify a
+ pooled-object feature as belonging too.
+ num_attr_labels (`int`, *optional*, defaults to 400):
+ This represents the total number of semantically unique attributes that lxmert will be able to classify a
+ pooled-object feature as possessing.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ type_vocab_size (`int`, *optional*, defaults to 2):
+ The vocabulary size of the *token_type_ids* passed into [`BertModel`].
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ l_layers (`int`, *optional*, defaults to 9):
+ Number of hidden layers in the Transformer language encoder.
+ x_layers (`int`, *optional*, defaults to 5):
+ Number of hidden layers in the Transformer cross modality encoder.
+ r_layers (`int`, *optional*, defaults to 5):
+ Number of hidden layers in the Transformer visual encoder.
+ visual_feat_dim (`int`, *optional*, defaults to 2048):
+ This represents the last dimension of the pooled-object features used as input for the model, representing
+ the size of each object feature itself.
+ visual_pos_dim (`int`, *optional*, defaults to 4):
+ This represents the number of spacial features that are mixed into the visual features. The default is set
+ to 4 because most commonly this will represent the location of a bounding box. i.e., (x, y, width, height)
+ visual_loss_normalizer (`float`, *optional*, defaults to 6.67):
+ This represents the scaling factor in which each visual loss is multiplied by if during pretraining, one
+ decided to train with multiple vision-based loss objectives.
+ task_matched (`bool`, *optional*, defaults to `True`):
+ This task is used for sentence-image matching. If the sentence correctly describes the image the label will
+ be 1. If the sentence does not correctly describe the image, the label will be 0.
+ task_mask_lm (`bool`, *optional*, defaults to `True`):
+ Whether or not to add masked language modeling (as used in pretraining models such as BERT) to the loss
+ objective.
+ task_obj_predict (`bool`, *optional*, defaults to `True`):
+ Whether or not to add object prediction, attribute prediction and feature regression to the loss objective.
+ task_qa (`bool`, *optional*, defaults to `True`):
+ Whether or not to add the question-answering loss to the objective
+ visual_obj_loss (`bool`, *optional*, defaults to `True`):
+ Whether or not to calculate the object-prediction loss objective
+ visual_attr_loss (`bool`, *optional*, defaults to `True`):
+ Whether or not to calculate the attribute-prediction loss objective
+ visual_feat_loss (`bool`, *optional*, defaults to `True`):
+ Whether or not to calculate the feature-regression loss objective
+ """
+
+ model_type = "lxmert"
+ attribute_map = {}
+
+ def __init__(
+ self,
+ vocab_size=30522,
+ hidden_size=768,
+ num_attention_heads=12,
+ num_qa_labels=9500,
+ num_object_labels=1600,
+ num_attr_labels=400,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.1,
+ attention_probs_dropout_prob=0.1,
+ max_position_embeddings=512,
+ type_vocab_size=2,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ l_layers=9,
+ x_layers=5,
+ r_layers=5,
+ visual_feat_dim=2048,
+ visual_pos_dim=4,
+ visual_loss_normalizer=6.67,
+ task_matched=True,
+ task_mask_lm=True,
+ task_obj_predict=True,
+ task_qa=True,
+ visual_obj_loss=True,
+ visual_attr_loss=True,
+ visual_feat_loss=True,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.num_attention_heads = num_attention_heads
+ self.hidden_act = hidden_act
+ self.intermediate_size = intermediate_size
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.max_position_embeddings = max_position_embeddings
+ self.type_vocab_size = type_vocab_size
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.num_qa_labels = num_qa_labels
+ self.num_object_labels = num_object_labels
+ self.num_attr_labels = num_attr_labels
+ self.l_layers = l_layers
+ self.x_layers = x_layers
+ self.r_layers = r_layers
+ self.visual_feat_dim = visual_feat_dim
+ self.visual_pos_dim = visual_pos_dim
+ self.visual_loss_normalizer = visual_loss_normalizer
+ self.task_matched = task_matched
+ self.task_mask_lm = task_mask_lm
+ self.task_obj_predict = task_obj_predict
+ self.task_qa = task_qa
+ self.visual_obj_loss = visual_obj_loss
+ self.visual_attr_loss = visual_attr_loss
+ self.visual_feat_loss = visual_feat_loss
+ self.num_hidden_layers = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
+ super().__init__(**kwargs)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/convert_lxmert_original_tf_checkpoint_to_pytorch.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/convert_lxmert_original_tf_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..f8eb86f1d1e48a1459154b647fb2f4178df338b0
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/convert_lxmert_original_tf_checkpoint_to_pytorch.py
@@ -0,0 +1,60 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert LXMERT checkpoint."""
+
+
+import argparse
+
+import torch
+
+from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
+from transformers.utils import logging
+
+
+logging.set_verbosity_info()
+
+
+def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path):
+ # Initialise PyTorch model
+ config = LxmertConfig.from_json_file(config_file)
+ print(f"Building PyTorch model from configuration: {config}")
+ model = LxmertForPreTraining(config)
+
+ # Load weights from tf checkpoint
+ load_tf_weights_in_lxmert(model, config, tf_checkpoint_path)
+
+ # Save pytorch-model
+ print(f"Save PyTorch model to {pytorch_dump_path}")
+ torch.save(model.state_dict(), pytorch_dump_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
+ )
+ parser.add_argument(
+ "--config_file",
+ default=None,
+ type=str,
+ required=True,
+ help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
+ )
+ parser.add_argument(
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
+ )
+ args = parser.parse_args()
+ convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/tokenization_lxmert_fast.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/tokenization_lxmert_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..7d9758a601b49cc9a1d45eb8401f908b7b2d7975
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/lxmert/tokenization_lxmert_fast.py
@@ -0,0 +1,191 @@
+# coding=utf-8
+# Copyright 2020 The Google AI Team, Stanford University and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+from typing import List, Optional, Tuple
+
+from tokenizers import normalizers
+
+from ...tokenization_utils_fast import PreTrainedTokenizerFast
+from .tokenization_lxmert import LxmertTokenizer
+
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {
+ "unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt",
+ },
+ "tokenizer_file": {
+ "unc-nlp/lxmert-base-uncased": (
+ "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"
+ ),
+ },
+}
+
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
+ "unc-nlp/lxmert-base-uncased": 512,
+}
+
+PRETRAINED_INIT_CONFIGURATION = {
+ "unc-nlp/lxmert-base-uncased": {"do_lower_case": True},
+}
+
+
+# Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast with bert-base-cased->unc-nlp/lxmert-base-uncased, BERT->Lxmert, Bert->Lxmert
+class LxmertTokenizerFast(PreTrainedTokenizerFast):
+ r"""
+ Construct a "fast" Lxmert tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
+
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
+ refer to this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ File containing the vocabulary.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
+ The token used for padding, for example when batching sequences of different lengths.
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ clean_text (`bool`, *optional*, defaults to `True`):
+ Whether or not to clean the text before tokenization by removing any control characters and replacing all
+ whitespaces by the classic one.
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
+ issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original Lxmert).
+ wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
+ The prefix for subwords.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ slow_tokenizer_class = LxmertTokenizer
+
+ def __init__(
+ self,
+ vocab_file=None,
+ tokenizer_file=None,
+ do_lower_case=True,
+ unk_token="[UNK]",
+ sep_token="[SEP]",
+ pad_token="[PAD]",
+ cls_token="[CLS]",
+ mask_token="[MASK]",
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ **kwargs,
+ ):
+ super().__init__(
+ vocab_file,
+ tokenizer_file=tokenizer_file,
+ do_lower_case=do_lower_case,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ **kwargs,
+ )
+
+ normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
+ if (
+ normalizer_state.get("lowercase", do_lower_case) != do_lower_case
+ or normalizer_state.get("strip_accents", strip_accents) != strip_accents
+ or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
+ ):
+ normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
+ normalizer_state["lowercase"] = do_lower_case
+ normalizer_state["strip_accents"] = strip_accents
+ normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
+ self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
+
+ self.do_lower_case = do_lower_case
+
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A Lxmert sequence has the following format:
+
+ - single sequence: `[CLS] X [SEP]`
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+
+ if token_ids_1 is not None:
+ output += token_ids_1 + [self.sep_token_id]
+
+ return output
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Lxmert sequence
+ pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
+ return tuple(files)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f1b21c7d2f7126363420aad2bf2e71db07570111
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2021 NVIDIA Corporation and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..aafc6c0747ae8aa764f134d03e445562b17c3d30
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__pycache__/checkpoint_reshaping_and_interoperability.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__pycache__/checkpoint_reshaping_and_interoperability.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c57718750e9b98a0fd1698f81be3ca2528af3393
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__pycache__/checkpoint_reshaping_and_interoperability.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__pycache__/convert_megatron_gpt2_checkpoint.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__pycache__/convert_megatron_gpt2_checkpoint.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ca393117ff03dfa449859cd6a18c8eede7616d0b
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__pycache__/convert_megatron_gpt2_checkpoint.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/megatron_gpt2/checkpoint_reshaping_and_interoperability.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/megatron_gpt2/checkpoint_reshaping_and_interoperability.py
new file mode 100644
index 0000000000000000000000000000000000000000..15ccfb4dcb1ff8f0245dc870b0dbc146d07284c8
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/megatron_gpt2/checkpoint_reshaping_and_interoperability.py
@@ -0,0 +1,904 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import json
+import os
+import re
+import sys
+import types
+
+import torch
+
+from transformers import AutoTokenizer, GPT2Config
+from transformers.modeling_utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME, shard_checkpoint
+
+
+def add_checkpointing_args(parser):
+ parser.add_argument("--megatron-path", type=str, default=None, help="Base directory of Megatron repository")
+ parser.add_argument(
+ "--convert_checkpoint_from_megatron_to_transformers",
+ action="store_true",
+ help=(
+ "If True, convert a Megatron checkpoint to a Transformers checkpoint. "
+ "If False, convert a Transformers checkpoint to a Megatron checkpoint."
+ ),
+ )
+ parser.add_argument(
+ "--load_path",
+ type=str,
+ required=True,
+ help="Path to the checkpoint to convert.",
+ )
+ parser.add_argument(
+ "--save_path",
+ type=str,
+ required=True,
+ help="Path to the converted checkpoint.",
+ )
+ parser.add_argument("--print-checkpoint-structure", action="store_true")
+ return parser
+
+
+def add_megatron_checkpoint_args(parser):
+ parser.add_argument(
+ "--target_tensor_model_parallel_size",
+ type=int,
+ default=1,
+ help=(
+ "The tensor model parallel size of the converted checkpoint. "
+ "Only used when converting a Transformers checkpoint to a Megatron checkpoint."
+ ),
+ )
+ parser.add_argument(
+ "--target_pipeline_model_parallel_size",
+ type=int,
+ default=1,
+ help=(
+ "The pipeline model parallel size of the converted checkpoint. "
+ "Only used when converting a Transformers checkpoint to a Megatron checkpoint."
+ ),
+ )
+ parser.add_argument(
+ "--target_data_parallel_size",
+ type=int,
+ default=1,
+ help=(
+ "The data parallel size of the converted checkpoint. "
+ "Only used when converting a Transformers checkpoint to a Megatron checkpoint."
+ ),
+ )
+ parser.add_argument(
+ "--target_params_dtype",
+ type=str,
+ default="fp32",
+ help=(
+ "The dtype of the converted checkpoint. "
+ "Only used when converting a Transformers checkpoint to a Megatron checkpoint."
+ ),
+ )
+ parser.add_argument(
+ "--make_vocab_size_divisible_by",
+ type=int,
+ default=128,
+ help=(
+ "Pad the vocab size to be divisible by this value. "
+ "This is added for computational efficieny reasons. "
+ "Only used when converting a Transformers checkpoint to a Megatron checkpoint."
+ ),
+ )
+ parser.add_argument(
+ "--use_distributed_optimizer",
+ action="store_true",
+ help=(
+ "If True, use the distributed optimizer. "
+ "Only used when converting a Transformers checkpoint to a Megatron checkpoint."
+ ),
+ )
+ return parser
+
+
+def add_transformers_checkpoint_args(parser):
+ parser.add_argument(
+ "--tokenizer_name",
+ type=str,
+ default=None,
+ help=(
+ "The name of the pre-trained tokenizer to save. "
+ "If not None, the tokenizer will be saved. "
+ "Only used when converting a Megatron checkpoint to a Transformers checkpoint."
+ ),
+ )
+ parser.add_argument(
+ "--max_shard_size",
+ type=str,
+ default="10GB",
+ help=(
+ "The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size "
+ "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`). "
+ "Only used when converting a Megatron checkpoint to a Transformers checkpoint."
+ ),
+ )
+
+ return parser
+
+
+# The simple map of names for "automated" rules.
+megatron_to_transformers = {
+ "attention.dense": ".attn.c_proj.",
+ "self_attention.dense": ".attn.c_proj.",
+ "mlp.dense_h_to_4h": ".mlp.c_fc.",
+ "mlp.dense_4h_to_h": ".mlp.c_proj.",
+}
+transformers_to_megatron = {v[1:-1]: k for k, v in megatron_to_transformers.items()}
+
+tensor_parallel_params = [
+ # megatron-lm layers to merge across tp ranks
+ "self_attention.query_key_value.weight",
+ "self_attention.query_key_value.bias",
+ "self_attention.dense.weight",
+ "mlp.dense_h_to_4h.weight",
+ "mlp.dense_h_to_4h.bias",
+ "mlp.dense_4h_to_h.weight",
+ # deprecated
+ "attention.query_key_value.weight",
+ "attention.query_key_value.bias",
+ "attention.dense.weight",
+ # transformers layers to split across tp ranks
+ "attn.c_attn.weight",
+ "attn.c_attn.bias",
+ "attn.c_proj.weight",
+ "mlp.c_fc.weight",
+ "mlp.c_fc.bias",
+ "mlp.c_proj.weight",
+]
+
+
+def recursive_print(name, val, spaces=0):
+ """
+ Recursively print the structure of a checkpoint. This function is taken from `convert_megatron_gpt2_checkpoint.py`
+
+ Args:
+ name (str): the name of the current tensor parameter
+ val (Tuple(int)): the shape of the current tensor parameter
+ spaces (int): the number of spaces to print before the output for a nested structure
+ """
+ # Format the message.
+ if name is None:
+ msg = None
+ else:
+ fmt = "." * max(0, spaces - 2) + "# {:" + str(50 - spaces) + "s}"
+ msg = fmt.format(name)
+
+ # Print and recurse (if needed).
+ if isinstance(val, dict):
+ if msg is not None:
+ print(msg)
+ for k in val.keys():
+ recursive_print(k, val[k], spaces + 2)
+ elif isinstance(val, torch.Tensor):
+ print(msg, ":", val.size())
+ else:
+ print(msg, ":", val)
+
+
+def megatron_to_transformers_fix_query_key_value_ordering(
+ param, checkpoint_version, num_splits, num_heads, hidden_size
+):
+ """
+ Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :] for compatibility with later versions
+ of NVIDIA Megatron-LM. The inverse operation is performed inside Megatron-LM to read checkpoints:
+ https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209 If param is the weight tensor of the
+ self-attention block, the returned tensor will have to be transposed one more time to be read by HuggingFace GPT2.
+ This function is taken from `convert_megatron_gpt2_checkpoint.py`
+
+ Args:
+ param (torch.Tensor): the tensor to permute
+ checkpoint_version (int): the version of the checkpoint.
+ num_splits (int): the number of projections, usually 3 for (Query, Key, Value)
+ num_heads (int): the number of attention heads
+ hidden_size (int): the hidden size per head
+ """
+
+ input_shape = param.size()
+ if checkpoint_version == 1.0:
+ # version 1.0 stores [num_heads * hidden_size * num_splits, :]
+ saved_shape = (num_heads, hidden_size, num_splits) + input_shape[1:]
+ param = param.view(*saved_shape)
+ param = param.transpose(0, 2)
+ param = param.transpose(1, 2).contiguous()
+ elif checkpoint_version >= 2.0:
+ # other versions store [num_heads * num_splits * hidden_size, :]
+ saved_shape = (num_heads, num_splits, hidden_size) + input_shape[1:]
+ param = param.view(*saved_shape)
+ param = param.transpose(0, 1).contiguous()
+ param = param.view(*input_shape)
+ return param
+
+
+def transformers_to_megatron_fix_query_key_value_ordering(
+ param, checkpoint_version, num_splits, num_heads, hidden_size
+):
+ """
+ Permutes layout of param tensor to the one compatible with respective NVIDIA Megatron-LM chekpoint versions. Input
+ is [num_splits * num_heads * hidden_size, :] and output is [num_heads * hidden_size * num_splits, :] for version
+ 1.0 and [num_heads * num_splits * hidden_size, :] for version 2.0 and later. If param is the weight tensor of the
+ self-attention block, the param needs to be already transposed before calling this function.
+
+ Args:
+ param (torch.Tensor): the tensor to permute
+ checkpoint_version (int): the version of the checkpoint.
+ num_splits (int): the number of projections, usually 3 for (Query, Key, Value)
+ num_heads (int): the number of attention heads
+ hidden_size (int): the hidden size per head
+ """
+
+ # Input is [num_splits * num_heads * hidden_size, :]
+ input_shape = param.size()
+ if checkpoint_version == 1.0:
+ # version 1.0 stores [num_heads * hidden_size * num_splits, :]
+ current_shape = (num_splits, num_heads, hidden_size) + input_shape[1:]
+ param = param.view(*current_shape)
+ param = param.transpose(0, 2)
+ param = param.transpose(1, 2).contiguous()
+ elif checkpoint_version >= 2.0:
+ # other versions store [num_heads * num_splits * hidden_size, :]
+ current_shape = (num_splits, num_heads, hidden_size) + input_shape[1:]
+ param = param.view(*current_shape)
+ param = param.transpose(0, 1).contiguous()
+ param = param.view(*input_shape)
+ return param
+
+
+def merge_transformers_sharded_states(path, num_checkpoints):
+ """
+ Merge sharded checkpoints from transformers into a single checkpoint.
+
+ Args:
+ path (str): the path to the sharded checkpoints
+ num_checkpoints (int): the number of checkpoints to merge
+ """
+ state_dict = {}
+ for i in range(1, num_checkpoints + 1):
+ checkpoint_path = os.path.join(path, f"pytorch_model-{i:05d}-of-{num_checkpoints:05d}.bin")
+ current_chunk = torch.load(checkpoint_path, map_location="cpu")
+ state_dict.update(current_chunk)
+ return state_dict
+
+
+def get_megatron_sharded_states(args, tp_size, pp_size, pp_rank):
+ """
+ Get sharded checkpoints from NVIDIA Megatron-LM checkpoint based on the provided tensor parallel size, pipeline
+ parallel size and pipeline parallel rank.
+
+ Args:
+ args (argparse.Namespace): the arguments to the script
+ tp_size (int): the tensor parallel size
+ pp_size (int): the pipeline parallel size
+ pp_rank (int): the pipeline parallel rank
+ """
+ tp_state_dicts = []
+ for i in range(tp_size):
+ sub_dir_name = f"mp_rank_{i:02d}" if pp_size == 1 else f"mp_rank_{i:02d}_{pp_rank:03d}"
+ for checkpoint_name in ["model_optim_rng.pt", "model_rng.pt"]:
+ checkpoint_path = os.path.join(args.load_path, sub_dir_name, checkpoint_name)
+ if os.path.isfile(checkpoint_path):
+ break
+ state_dict = torch.load(checkpoint_path, map_location="cpu")
+ tp_state_dicts.append(state_dict)
+ return tp_state_dicts
+
+
+def get_element_from_dict_by_path(d, path):
+ """
+ Get element from dictionary by path. If element is not present, recursively add empty dictionaries.
+
+ Args:
+ d (dict): the dictionary to get the element from
+ path (list): the path to the element which is delimited by "."
+ """
+ path = path.split(".")
+ for k in path:
+ if k not in d:
+ d[k] = {}
+ d = d[k]
+ return d
+
+
+def convert_checkpoint_from_megatron_to_transformers(args):
+ """
+ Convert NVIDIA Megatron-LM checkpoint to HuggingFace Transformers checkpoint. This handles Megatron checkpoints
+ with different tensor parallelism and pipeline parallelism sizes. It saves the converted checkpoint into shards
+ using HuggingFace Transformers checkpoint sharding functionality. This greatly extends the functionality of
+ `convert_megatron_gpt2_checkpoint.py`
+
+ Args:
+ args (argparse.Namespace): the arguments to the script
+ """
+ # Load Megatron-LM checkpoint arguments from the state dict
+ sub_dirs = os.listdir(args.load_path)
+ possible_sub_dirs = ["mp_rank_00", "mp_rank_00_000"]
+ for sub_dir in possible_sub_dirs:
+ if sub_dir in sub_dirs:
+ rank0_checkpoint_name = os.listdir(os.path.join(args.load_path, sub_dir))[0]
+ rank0_checkpoint_path = os.path.join(args.load_path, sub_dir, rank0_checkpoint_name)
+ break
+ print(f"Loading Megatron-LM checkpoint arguments from: {rank0_checkpoint_path}")
+ state_dict = torch.load(rank0_checkpoint_path, map_location="cpu")
+ megatron_args = state_dict.get("args", None)
+ if megatron_args is None:
+ raise ValueError(
+ "Megatron-LM checkpoint does not contain arguments. This utility only supports Megatron-LM checkpoints"
+ " containing all the megatron arguments. This is because it loads all config related to model"
+ " architecture, the tensor and pipeline model parallel size from the checkpoint insead of user having to"
+ " manually specify all the details. Please save Megatron-LM checkpoint along with all the megatron"
+ " arguments to use this utility."
+ )
+
+ # Create Transformers GPT2 config from Megatron-LM arguments
+ if megatron_args is not None:
+ if megatron_args.bias_gelu_fusion:
+ activation_function = "gelu_fast"
+ elif megatron_args.openai_gelu:
+ activation_function = "gelu_new"
+ else:
+ activation_function = "gelu"
+ else:
+ # in the very early days this used to be "gelu_new"
+ activation_function = "gelu_new"
+ vocab_size = (
+ megatron_args.padded_vocab_size
+ if getattr(megatron_args, "orig_vocab_size", None) is None
+ else megatron_args.orig_vocab_size
+ )
+ print(vocab_size)
+
+ config = GPT2Config(
+ vocab_size=vocab_size,
+ n_positions=megatron_args.max_position_embeddings,
+ n_embd=megatron_args.hidden_size,
+ n_layer=megatron_args.num_layers,
+ n_head=megatron_args.num_attention_heads,
+ n_inner=megatron_args.ffn_hidden_size,
+ activation_function=activation_function,
+ resid_pdrop=0.1,
+ embd_pdrop=0.1,
+ attn_pdrop=0.1,
+ layer_norm_epsilon=1e-5,
+ initializer_range=0.02,
+ summary_type="cls_index",
+ summary_use_proj=True,
+ summary_activation=None,
+ summary_proj_to_labels=True,
+ summary_first_dropout=0.1,
+ scale_attn_weights=True,
+ use_cache=True,
+ bos_token_id=vocab_size - 1,
+ eos_token_id=vocab_size - 1,
+ architectures=["GPT2LMHeadModel"],
+ )
+
+ output_state_dict = {}
+
+ checkpoint_version = state_dict.get("checkpoint_version", 0.0)
+ tp_size = megatron_args.tensor_model_parallel_size
+ pp_size = megatron_args.pipeline_model_parallel_size
+ dtype = torch.float32
+ # The regex to extract layer names.
+ layer_re = re.compile(r"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)")
+
+ # Convert.
+ print("Converting")
+
+ # Embeddings
+ print("Converting embeddings")
+ tp_state_dicts = get_megatron_sharded_states(args, tp_size, pp_size, 0)
+
+ # Convert and store the position embeddings.
+ position_embeddings = get_element_from_dict_by_path(
+ tp_state_dicts[0], "model.language_model.embedding.position_embeddings.weight"
+ )
+ output_state_dict["transformer.wpe.weight"] = position_embeddings.to(dtype)
+
+ # Convert and store the word embeddings.
+ word_embeddings = torch.cat(
+ [
+ get_element_from_dict_by_path(
+ tp_state_dicts[tp_rank], "model.language_model.embedding.word_embeddings.weight"
+ )
+ for tp_rank in range(tp_size)
+ ],
+ dim=0,
+ )
+ word_embeddings = word_embeddings[:vocab_size].to(dtype)
+ output_state_dict["transformer.wte.weight"] = word_embeddings
+
+ # Transformer Layers
+ print("Converting transformer layers")
+ # The number of heads.
+ heads = config.n_head
+ # The hidden_size per head.
+ hidden_size_per_head = config.n_embd // config.n_head
+ n_positions = config.n_positions
+ num_layers = config.num_hidden_layers // pp_size
+
+ for pp_rank in range(pp_size):
+ if pp_size > 0:
+ print(f"Converting pipeline parallel rank {pp_rank}")
+ tp_state_dicts = get_megatron_sharded_states(args, tp_size, pp_size, pp_rank)
+
+ # The transformer.
+ path = (
+ "model.language_model.transformer"
+ if "transformer" in get_element_from_dict_by_path(tp_state_dicts[0], "model.language_model").keys()
+ else "model.language_model.encoder"
+ )
+ # Extract the layers.
+ for key, val in get_element_from_dict_by_path(tp_state_dicts[0], path).items():
+ # Match the name.
+ m = layer_re.match(key)
+ # Stop if that's not a layer
+ if m is None:
+ break
+
+ # The index of the layer.
+ layer_idx = int(m.group(1)) + pp_rank * num_layers
+ # The name of the operation.
+ op_name = m.group(2)
+ # Is it a weight or a bias?
+ weight_or_bias = m.group(3)
+
+ # The name of the layer.
+ layer_name = f"transformer.h.{layer_idx}"
+
+ if op_name + "." + weight_or_bias not in tensor_parallel_params:
+ params = val.to(dtype)
+ else:
+ dim = 1 if op_name in ["self_attention.dense", "mlp.dense_4h_to_h", "attention.dense"] else 0
+ params = torch.cat(
+ [val]
+ + [
+ get_element_from_dict_by_path(tp_state_dicts[tp_rank], f"{path}")[key]
+ for tp_rank in range(1, tp_size)
+ ],
+ dim=dim,
+ ).to(dtype)
+
+ # For layernorm(s), simply store the layer norm.
+ if op_name.endswith("layernorm"):
+ ln_name = "ln_1" if op_name.startswith("input") else "ln_2"
+ output_state_dict[layer_name + "." + ln_name + "." + weight_or_bias] = params
+
+ # Transpose the QKV matrix.
+ elif (
+ op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
+ ) and weight_or_bias == "weight":
+ # Insert a tensor of 1x1xDxD bias.
+ causal_mask = torch.tril(torch.ones((n_positions, n_positions), dtype=dtype)).view(
+ 1, 1, n_positions, n_positions
+ )
+ output_state_dict[layer_name + ".attn.bias"] = causal_mask
+
+ # Insert a "dummy" tensor for masked_bias.
+ masked_bias = torch.tensor(-1e4, dtype=dtype)
+ output_state_dict[layer_name + ".attn.masked_bias"] = masked_bias
+
+ out_val = megatron_to_transformers_fix_query_key_value_ordering(
+ params,
+ checkpoint_version,
+ 3,
+ heads,
+ hidden_size_per_head,
+ )
+ # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
+ out_val = out_val.transpose(0, 1).contiguous()
+ # Store.
+ output_state_dict[layer_name + ".attn.c_attn.weight"] = out_val
+
+ # Transpose the bias.
+ elif (
+ op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
+ ) and weight_or_bias == "bias":
+ out_val = megatron_to_transformers_fix_query_key_value_ordering(
+ params, checkpoint_version, 3, heads, hidden_size_per_head
+ )
+ # Store. No change of shape.
+ output_state_dict[layer_name + ".attn.c_attn.bias"] = out_val
+
+ # Transpose the weights.
+ elif weight_or_bias == "weight":
+ out_name = megatron_to_transformers[op_name]
+ output_state_dict[layer_name + out_name + "weight"] = params.transpose(0, 1)
+
+ # Copy the bias.
+ elif weight_or_bias == "bias":
+ out_name = megatron_to_transformers[op_name]
+ output_state_dict[layer_name + out_name + "bias"] = params
+
+ if config.n_layer != (layer_idx + 1):
+ raise ValueError(f"Expected {config.n_layer} layers but found {layer_idx + 1}")
+
+ # The final layernorm.
+ print("Converting final layernorm")
+ params = get_element_from_dict_by_path(tp_state_dicts[0], str(path))
+ output_state_dict["transformer.ln_f.weight"] = params["final_layernorm.weight"].to(dtype)
+ output_state_dict["transformer.ln_f.bias"] = params["final_layernorm.bias"].to(dtype)
+
+ # For LM head, transformers' wants the matrix to weight embeddings.
+ print("Converting LM head")
+ output_state_dict["lm_head.weight"] = word_embeddings.to(dtype)
+
+ # It should be done!
+ print("Conversion from Megatron-LM to Transformers is done!")
+
+ # Print the structure of converted state dict.
+ if args.print_checkpoint_structure:
+ recursive_print(None, output_state_dict)
+
+ # Add tokenizer class info to config
+ # see https://github.com/huggingface/transformers/issues/13906)
+
+ if args.tokenizer_name is None:
+ tokenizer_name = "openai-community/gpt2"
+ else:
+ tokenizer_name = args.tokenizer_name
+
+ tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
+ tokenizer_class = type(tokenizer).__name__
+ config.tokenizer_class = tokenizer_class
+
+ # Store the config to file.
+ print("Saving config")
+ config.save_pretrained(args.save_path)
+
+ # Save tokenizer based on args
+ if args.tokenizer_name is not None:
+ print(f"Adding {tokenizer_class} tokenizer files")
+ tokenizer.save_pretrained(args.save_path)
+
+ # Store the state_dict to file.
+ max_shard_size = int(args.max_shard_size) if args.max_shard_size.isdigit() else args.max_shard_size
+ shards, index = shard_checkpoint(output_state_dict, max_shard_size=max_shard_size)
+
+ # Save the model
+ for shard_file, shard in shards.items():
+ torch.save(shard, os.path.join(args.save_path, shard_file))
+
+ if index is None:
+ print(f"Model weights saved in {os.path.join(args.save_path, WEIGHTS_NAME)}")
+ else:
+ save_index_file = os.path.join(args.save_path, WEIGHTS_INDEX_NAME)
+ # Save the index as well
+ with open(save_index_file, "w", encoding="utf-8") as f:
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
+ f.write(content)
+ print(
+ f"The model is bigger than the maximum size per checkpoint ({args.max_shard_size}) and is going to be "
+ f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the "
+ f"index located at {save_index_file}."
+ )
+
+
+def convert_checkpoint_from_transformers_to_megatron(args):
+ """
+ Convert a checkpoint from HuggingFace Transformers to Megatron-LM. This allows converted checkpoints with variable
+ tensor parallelism and pipeline parallelism sizes. It takes as input a checkpoint from HuggingFace Transformers
+ which can have multiple shards.
+
+ Args:
+ args (argparse.Namespace): the arguments to the script
+
+ """
+ os.makedirs(args.save_path, exist_ok=True)
+ # Search in directory above this
+ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
+ if args.megatron_path is not None:
+ sys.path.insert(0, args.megatron_path)
+
+ try:
+ from megatron.tokenizer.tokenizer import _vocab_size_with_padding
+ except ModuleNotFoundError:
+ print("Unable to import Megatron, please specify the path to Megatron using --megatron-path. Exiting.")
+ exit(1)
+
+ # load the transformers model state dict and config
+ sub_dirs = [x for x in os.listdir(args.load_path) if x.startswith("pytorch_model")]
+ if len(sub_dirs) == 1:
+ checkpoint_name = "pytorch_model.bin"
+ state_dict = torch.load(os.path.join(args.load_path, checkpoint_name), map_location="cpu")
+ else:
+ num_checkpoints = len(sub_dirs) - 1
+ state_dict = merge_transformers_sharded_states(args.load_path, num_checkpoints)
+
+ config = GPT2Config.from_pretrained(args.load_path)
+
+ # Saving the tracker file
+ tracker_filepath = os.path.join(args.save_path, "latest_checkpointed_iteration.txt")
+ with open(tracker_filepath, "w") as f:
+ f.write("release")
+
+ # create `release` dir in args.load_path
+ release_dir = os.path.join(args.save_path, "release")
+ os.makedirs(release_dir, exist_ok=True)
+
+ # megatron args
+ megatron_args = {
+ "orig_vocab_size": config.vocab_size,
+ "max_position_embeddings": config.n_positions,
+ "hidden_size": config.n_embd,
+ "num_layers": config.n_layer,
+ "num_attention_heads": config.n_head,
+ "ffn_hidden_size": config.n_inner,
+ "tensor_model_parallel_size": args.target_tensor_model_parallel_size,
+ "pipeline_model_parallel_size": args.target_pipeline_model_parallel_size,
+ "data_parallel_size": args.target_data_parallel_size,
+ "make_vocab_size_divisible_by": args.make_vocab_size_divisible_by,
+ "rank": 0,
+ "tokenizer_type": "GPT2BPETokenizer",
+ }
+
+ if config.activation_function == "gelu":
+ megatron_args["bias_gelu_fusion"] = False
+ megatron_args["openai_gelu"] = False
+ elif config.activation_function == "gelu_fast":
+ megatron_args["bias_gelu_fusion"] = True
+ megatron_args["openai_gelu"] = False
+ elif config.activation_function == "gelu_new":
+ megatron_args["bias_gelu_fusion"] = False
+ megatron_args["openai_gelu"] = True
+
+ margs = types.SimpleNamespace()
+ for k, v in megatron_args.items():
+ setattr(margs, k, v)
+
+ # params dtype
+ if args.target_params_dtype == "fp16":
+ dtype = torch.float16
+ elif args.target_params_dtype == "bf16":
+ dtype = torch.bfloat16
+ else:
+ dtype = torch.float32
+ setattr(margs, "params_dtype", dtype)
+
+ # save dummy optim state dict
+ dummy_optim_state_dict = {}
+ dummy_optim_state_dict["optimizer"] = {
+ "step": 0,
+ "param_groups": [
+ {
+ "lr": 0.0,
+ "beta1": 0.0,
+ "beta2": 0.0,
+ "eps": 0.0,
+ "weight_decay": 0.0,
+ "correct_bias": False,
+ "params": [],
+ }
+ ],
+ }
+ if args.use_distributed_optimizer:
+ for i in range(args.target_pipeline_model_parallel_size):
+ for j in range(args.target_tensor_model_parallel_size):
+ for k in range(args.target_data_parallel_size):
+ if args.target_pipeline_model_parallel_size == 1:
+ checkpoint_dir = f"mp_rank_{j:02d}_{k:03d}"
+ else:
+ checkpoint_dir = f"mp_rank_{j:02d}_{i:03d}_{k:03d}"
+ checkpoint_dir = os.path.join(release_dir, checkpoint_dir)
+ os.makedirs(checkpoint_dir, exist_ok=True)
+ torch.save(
+ dummy_optim_state_dict,
+ os.path.join(checkpoint_dir, "optim.pt"),
+ )
+
+ # Convert.
+ print("Converting")
+ output_state_dict = []
+ for i in range(args.target_tensor_model_parallel_size):
+ output_state_dict.append({})
+
+ # Embedding layer
+ print("converting embedding layer")
+ pos_embedding = state_dict["transformer.wpe.weight"].to(dtype)
+ word_embedding = state_dict["transformer.wte.weight"].to(dtype)
+ orig_vocab_size = config.vocab_size
+ padded_vocab_size = _vocab_size_with_padding(orig_vocab_size, margs)
+ setattr(margs, "padded_vocab_size", padded_vocab_size)
+ # Cut out extra padding we don't need
+ if orig_vocab_size > padded_vocab_size:
+ full_word_embed = word_embedding[0:padded_vocab_size, :]
+ # Expanding embedding to larger size by replicating final entry
+ elif orig_vocab_size < padded_vocab_size:
+ padding_size = padded_vocab_size - orig_vocab_size
+ full_word_embed = torch.cat((word_embedding, word_embedding[-1].unsqueeze(0).expand(padding_size, -1)))
+ # Same size!
+ else:
+ full_word_embed = word_embedding
+
+ # Split into new tensor model parallel sizes
+ out_word_embed = torch.chunk(full_word_embed, args.target_tensor_model_parallel_size, dim=0)
+ for i in range(args.target_tensor_model_parallel_size):
+ pos_emb_dict = get_element_from_dict_by_path(
+ output_state_dict[i], "model.language_model.embedding.position_embeddings"
+ )
+ pos_emb_dict["weight"] = pos_embedding
+
+ word_emb_dict = get_element_from_dict_by_path(
+ output_state_dict[i], "model.language_model.embedding.word_embeddings"
+ )
+ word_emb_dict["weight"] = out_word_embed[i].clone()
+
+ # Transformer layers
+ print("converting transformer layers")
+ if config.num_attention_heads % args.target_tensor_model_parallel_size != 0:
+ raise ValueError(
+ f"Number of attention heads ({config.num_attention_heads}) must be divisible by number of tensor parallelism"
+ f" ({args.target_tensor_model_parallel_size})"
+ )
+
+ if config.num_hidden_layers % args.target_pipeline_model_parallel_size != 0:
+ raise ValueError(
+ f"Number of layers ({config.num_hidden_layers}) must be divisible by number of pipeline parallelism"
+ f" ({args.target_pipeline_model_parallel_size})"
+ )
+
+ num_layers = config.num_hidden_layers // args.target_pipeline_model_parallel_size
+
+ layer_re = re.compile(r"transformer.h\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)")
+ # The number of heads.
+ heads = config.n_head
+ # The hidden_size per head.
+ hidden_size_per_head = config.n_embd // config.n_head
+ for pp_rank in range(args.target_pipeline_model_parallel_size):
+ layer_offset = pp_rank * num_layers
+ if pp_rank > 0:
+ output_state_dict = []
+ for i in range(args.target_tensor_model_parallel_size):
+ output_state_dict.append({})
+
+ for layer in range(num_layers):
+ pp_layer_id = layer + layer_offset
+ layers_to_copy = [
+ layer_name
+ for layer_name in state_dict.keys()
+ if layer_name.startswith(f"transformer.h.{pp_layer_id}.")
+ ]
+
+ for layer_name in layers_to_copy:
+ m = layer_re.match(layer_name)
+ # Stop if that's not a layer
+ if m is None:
+ break
+
+ # The index of the layer.
+ _ = int(m.group(1))
+ # The name of the operation.
+ op_name = m.group(2)
+ # Is it a weight or a bias?
+ weight_or_bias = m.group(3)
+
+ params = state_dict[layer_name].to(dtype)
+ # handle layernorm
+ if op_name.startswith("ln"):
+ out_name = "input_layernorm" if op_name.endswith("1") else "post_attention_layernorm"
+ layer_name = f"layers.{layer}.{out_name}.{weight_or_bias}"
+
+ # handle attention K, V, Q weights
+ elif op_name.startswith("attn.c_attn") and weight_or_bias == "weight":
+ # transformers stores D X (3*D) but Megatron-LM expects (3*D) X D.
+ params = params.transpose(0, 1).contiguous()
+
+ params = transformers_to_megatron_fix_query_key_value_ordering(
+ params,
+ 3.0,
+ 3,
+ heads,
+ hidden_size_per_head,
+ )
+ layer_name = f"layers.{layer}.self_attention.query_key_value.{weight_or_bias}"
+
+ # handle attention K, V, Q bias
+ elif op_name.startswith("attn.c_attn") and weight_or_bias == "bias":
+ params = transformers_to_megatron_fix_query_key_value_ordering(
+ params,
+ 3.0,
+ 3,
+ heads,
+ hidden_size_per_head,
+ )
+ layer_name = f"layers.{layer}.self_attention.query_key_value.{weight_or_bias}"
+
+ # handle attention and mlp weights
+ elif weight_or_bias == "weight":
+ out_name = transformers_to_megatron.get(op_name, None)
+ if out_name is None:
+ continue
+ params = params.transpose(0, 1)
+ layer_name = f"layers.{layer}.{out_name}.{weight_or_bias}"
+
+ # handle attention and mlp bias
+ elif weight_or_bias == "bias":
+ out_name = transformers_to_megatron.get(op_name, None)
+ if out_name is None:
+ continue
+ layer_name = f"layers.{layer}.{out_name}.{weight_or_bias}"
+
+ # skip
+ else:
+ continue
+
+ if op_name + "." + weight_or_bias in tensor_parallel_params:
+ dim = 1 if op_name in ["attn.c_proj", "mlp.c_proj"] else 0
+ params = torch.chunk(params, args.target_tensor_model_parallel_size, dim=dim)
+
+ for i in range(args.target_tensor_model_parallel_size):
+ params_dict = get_element_from_dict_by_path(output_state_dict[i], "model.language_model.encoder")
+ params_dict[layer_name] = (
+ params[i].clone() if (op_name + "." + weight_or_bias in tensor_parallel_params) else params
+ )
+
+ if pp_rank == args.target_pipeline_model_parallel_size - 1:
+ # handle final layernorm
+ for weight_or_bias in ["weight", "bias"]:
+ params = state_dict[f"transformer.ln_f.{weight_or_bias}"].to(dtype)
+ layer_name = f"final_layernorm.{weight_or_bias}"
+ for i in range(args.target_tensor_model_parallel_size):
+ params_dict = get_element_from_dict_by_path(output_state_dict[i], "model.language_model.encoder")
+ params_dict[layer_name] = params
+
+ # add the LM head
+ for i in range(args.target_tensor_model_parallel_size):
+ params_dict = get_element_from_dict_by_path(output_state_dict[i], "model.word_embeddings_for_head")
+ params_dict["weight"] = out_word_embed[i].clone()
+
+ # saving the state dict as per the tp_rank and pp_rank
+ for tp_rank in range(args.target_tensor_model_parallel_size):
+ output_state_dict[tp_rank]["checkpoint_version"] = 3.0
+ output_state_dict[tp_rank]["args"] = margs
+ checkpoint_dir = (
+ f"mp_rank_{tp_rank:02d}"
+ if args.target_pipeline_model_parallel_size == 1
+ else f"mp_rank_{tp_rank:02d}_{pp_rank:03d}"
+ )
+ if args.use_distributed_optimizer:
+ checkpoint_name = "model_rng.pt"
+ else:
+ checkpoint_name = "model_optim_rng.pt"
+ output_state_dict[tp_rank]["optimizer"] = dummy_optim_state_dict["optimizer"]
+ checkpoint_dir = os.path.join(release_dir, checkpoint_dir)
+ os.makedirs(checkpoint_dir, exist_ok=True)
+ checkpoint_path = os.path.join(checkpoint_dir, checkpoint_name)
+ if args.print_checkpoint_structure:
+ print(
+ f"Checkpoint structure of model state dict shard belonging to TP rank {tp_rank} and PP rank"
+ f" {pp_rank}:"
+ )
+ recursive_print(None, output_state_dict[tp_rank])
+ torch.save(output_state_dict[tp_rank], checkpoint_path)
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser = add_checkpointing_args(parser)
+ parser = add_megatron_checkpoint_args(parser)
+ parser = add_transformers_checkpoint_args(parser)
+ args = parser.parse_args()
+ if args.convert_checkpoint_from_megatron_to_transformers:
+ convert_checkpoint_from_megatron_to_transformers(args)
+ else:
+ convert_checkpoint_from_transformers_to_megatron(args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..38060f8af5c7b0399f710eda2389cffd3669ea0d
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py
@@ -0,0 +1,358 @@
+####################################################################################################
+
+# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+####################################################################################################
+
+#
+# Note: If when running this conversion script you're getting an exception:
+# ModuleNotFoundError: No module named 'megatron.model.enums'
+# you need to tell python where to find the clone of Megatron-LM, e.g.:
+#
+# cd /tmp
+# git clone https://github.com/NVIDIA/Megatron-LM
+# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
+#
+# if you already have it cloned elsewhere, simply adjust the path to the existing path
+#
+# If the training was done using a Megatron-LM fork, e.g.,
+# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
+# in your path, i.e., /path/to/Megatron-DeepSpeed/
+#
+
+import argparse
+import os
+import re
+import zipfile
+
+import torch
+
+from transformers import AutoTokenizer, GPT2Config
+
+
+####################################################################################################
+
+
+def recursive_print(name, val, spaces=0):
+ # Format the message.
+ if name is None:
+ msg = None
+ else:
+ fmt = "." * max(0, spaces - 2) + "# {:" + str(50 - spaces) + "s}"
+ msg = fmt.format(name)
+
+ # Print and recurse (if needed).
+ if isinstance(val, dict):
+ if msg is not None:
+ print(msg)
+ for k in val.keys():
+ recursive_print(k, val[k], spaces + 2)
+ elif isinstance(val, torch.Tensor):
+ print(msg, ":", val.size())
+ else:
+ print(msg, ":", val)
+
+
+def fix_query_key_value_ordering(param, checkpoint_version, num_splits, num_heads, hidden_size):
+ # Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
+ # for compatibility with later versions of NVIDIA Megatron-LM.
+ # The inverse operation is performed inside Megatron-LM to read checkpoints:
+ # https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
+ # If param is the weight tensor of the self-attention block, the returned tensor
+ # will have to be transposed one more time to be read by HuggingFace GPT2.
+ input_shape = param.size()
+ if checkpoint_version == 1.0:
+ # version 1.0 stores [num_heads * hidden_size * num_splits, :]
+ saved_shape = (num_heads, hidden_size, num_splits) + input_shape[1:]
+ param = param.view(*saved_shape)
+ param = param.transpose(0, 2)
+ param = param.transpose(1, 2).contiguous()
+ elif checkpoint_version >= 2.0:
+ # other versions store [num_heads * num_splits * hidden_size, :]
+ saved_shape = (num_heads, num_splits, hidden_size) + input_shape[1:]
+ param = param.view(*saved_shape)
+ param = param.transpose(0, 1).contiguous()
+ param = param.view(*input_shape)
+ return param
+
+
+####################################################################################################
+
+
+def convert_megatron_checkpoint(args, input_state_dict, config):
+ # The converted output model.
+ output_state_dict = {}
+
+ # old versions did not store training args
+ ds_args = input_state_dict.get("args", None)
+ if ds_args is not None:
+ # do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
+ # from pprint import pprint
+ # pprint(vars(ds_args))
+
+ config.vocab_size = ds_args.padded_vocab_size
+ config.n_positions = ds_args.max_position_embeddings
+ config.n_embd = ds_args.hidden_size
+ config.n_layer = ds_args.num_layers
+ config.n_head = ds_args.num_attention_heads
+ config.n_inner = ds_args.ffn_hidden_size
+ # pprint(config)
+
+ # The number of heads.
+ heads = config.n_head
+ # The hidden_size per head.
+ hidden_size_per_head = config.n_embd // config.n_head
+ # Megatron-LM checkpoint version
+ if "checkpoint_version" in input_state_dict.keys():
+ checkpoint_version = input_state_dict["checkpoint_version"]
+ else:
+ checkpoint_version = 0.0
+
+ # The model.
+ model = input_state_dict["model"]
+ # The language model.
+ lm = model["language_model"]
+ # The embeddings.
+ embeddings = lm["embedding"]
+
+ # The word embeddings.
+ word_embeddings = embeddings["word_embeddings"]["weight"]
+ # Truncate the embedding table to vocab_size rows.
+ word_embeddings = word_embeddings[: config.vocab_size, :]
+ output_state_dict["transformer.wte.weight"] = word_embeddings
+
+ # The position embeddings.
+ pos_embeddings = embeddings["position_embeddings"]["weight"]
+ # Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
+ n_positions = pos_embeddings.size(0)
+ if n_positions != config.n_positions:
+ raise ValueError(
+ f"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match"
+ )
+ # Store the position embeddings.
+ output_state_dict["transformer.wpe.weight"] = pos_embeddings
+
+ # The transformer.
+ transformer = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
+
+ # The regex to extract layer names.
+ layer_re = re.compile(r"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)")
+
+ # The simple map of names for "automated" rules.
+ megatron_to_transformers = {
+ "attention.dense": ".attn.c_proj.",
+ "self_attention.dense": ".attn.c_proj.",
+ "mlp.dense_h_to_4h": ".mlp.c_fc.",
+ "mlp.dense_4h_to_h": ".mlp.c_proj.",
+ }
+
+ # Extract the layers.
+ for key, val in transformer.items():
+ # Match the name.
+ m = layer_re.match(key)
+
+ # Stop if that's not a layer
+ if m is None:
+ break
+
+ # The index of the layer.
+ layer_idx = int(m.group(1))
+ # The name of the operation.
+ op_name = m.group(2)
+ # Is it a weight or a bias?
+ weight_or_bias = m.group(3)
+
+ # The name of the layer.
+ layer_name = f"transformer.h.{layer_idx}"
+
+ # For layernorm(s), simply store the layer norm.
+ if op_name.endswith("layernorm"):
+ ln_name = "ln_1" if op_name.startswith("input") else "ln_2"
+ output_state_dict[layer_name + "." + ln_name + "." + weight_or_bias] = val
+
+ # Transpose the QKV matrix.
+ elif (
+ op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
+ ) and weight_or_bias == "weight":
+ # Insert a tensor of 1x1xDxD bias.
+ causal_mask = torch.tril(torch.ones((n_positions, n_positions), dtype=torch.float16)).view(
+ 1, 1, n_positions, n_positions
+ )
+ output_state_dict[layer_name + ".attn.bias"] = causal_mask
+
+ # Insert a "dummy" tensor for masked_bias.
+ masked_bias = torch.tensor(-1e4, dtype=torch.float16)
+ output_state_dict[layer_name + ".attn.masked_bias"] = masked_bias
+
+ out_val = fix_query_key_value_ordering(val, checkpoint_version, 3, heads, hidden_size_per_head)
+ # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
+ out_val = out_val.transpose(0, 1).contiguous()
+ # Store.
+ output_state_dict[layer_name + ".attn.c_attn.weight"] = out_val
+
+ # Transpose the bias.
+ elif (
+ op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
+ ) and weight_or_bias == "bias":
+ out_val = fix_query_key_value_ordering(val, checkpoint_version, 3, heads, hidden_size_per_head)
+ # Store. No change of shape.
+ output_state_dict[layer_name + ".attn.c_attn.bias"] = out_val
+
+ # Transpose the weights.
+ elif weight_or_bias == "weight":
+ out_name = megatron_to_transformers[op_name]
+ output_state_dict[layer_name + out_name + "weight"] = val.transpose(0, 1)
+
+ # Copy the bias.
+ elif weight_or_bias == "bias":
+ out_name = megatron_to_transformers[op_name]
+ output_state_dict[layer_name + out_name + "bias"] = val
+
+ # DEBUG.
+ assert config.n_layer == layer_idx + 1
+
+ # The final layernorm.
+ output_state_dict["transformer.ln_f.weight"] = transformer["final_layernorm.weight"]
+ output_state_dict["transformer.ln_f.bias"] = transformer["final_layernorm.bias"]
+
+ # For LM head, transformers' wants the matrix to weight embeddings.
+ output_state_dict["lm_head.weight"] = word_embeddings
+
+ # It should be done!
+ return output_state_dict
+
+
+####################################################################################################
+
+
+def main():
+ # Create the argument parser.
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--print-checkpoint-structure", action="store_true")
+ parser.add_argument(
+ "path_to_checkpoint",
+ type=str,
+ help="Path to the checkpoint file (.zip archive or direct .pt file)",
+ )
+ parser.add_argument(
+ "--config_file",
+ default="",
+ type=str,
+ help="An optional config json file describing the pre-trained model.",
+ )
+ args = parser.parse_args()
+
+ # Extract the basename.
+ basename = os.path.dirname(args.path_to_checkpoint)
+
+ # Load the model.
+ # the .zip is very optional, let's keep it for backward compatibility
+ print(f"Extracting PyTorch state dictionary from {args.path_to_checkpoint}")
+ if args.path_to_checkpoint.endswith(".zip"):
+ with zipfile.ZipFile(args.path_to_checkpoint, "r") as checkpoint:
+ with checkpoint.open("release/mp_rank_00/model_optim_rng.pt") as pytorch_dict:
+ input_state_dict = torch.load(pytorch_dict, map_location="cpu")
+ else:
+ input_state_dict = torch.load(args.path_to_checkpoint, map_location="cpu")
+
+ ds_args = input_state_dict.get("args", None)
+
+ # Read the config, or default to the model released by NVIDIA.
+ if args.config_file == "":
+ if ds_args is not None:
+ if ds_args.bias_gelu_fusion:
+ activation_function = "gelu_fast"
+ elif ds_args.openai_gelu:
+ activation_function = "gelu_new"
+ else:
+ activation_function = "gelu"
+ else:
+ # in the very early days this used to be "gelu_new"
+ activation_function = "gelu_new"
+
+ # Spell out all parameters in case the defaults change.
+ config = GPT2Config(
+ vocab_size=50257,
+ n_positions=1024,
+ n_embd=1024,
+ n_layer=24,
+ n_head=16,
+ n_inner=4096,
+ activation_function=activation_function,
+ resid_pdrop=0.1,
+ embd_pdrop=0.1,
+ attn_pdrop=0.1,
+ layer_norm_epsilon=1e-5,
+ initializer_range=0.02,
+ summary_type="cls_index",
+ summary_use_proj=True,
+ summary_activation=None,
+ summary_proj_to_labels=True,
+ summary_first_dropout=0.1,
+ scale_attn_weights=True,
+ use_cache=True,
+ bos_token_id=50256,
+ eos_token_id=50256,
+ )
+ else:
+ config = GPT2Config.from_json_file(args.config_file)
+
+ config.architectures = ["GPT2LMHeadModel"]
+
+ # Convert.
+ print("Converting")
+ output_state_dict = convert_megatron_checkpoint(args, input_state_dict, config)
+
+ # Print the structure of converted state dict.
+ if args.print_checkpoint_structure:
+ recursive_print(None, output_state_dict)
+
+ # Add tokenizer class info to config
+ # see https://github.com/huggingface/transformers/issues/13906)
+ if ds_args is not None:
+ tokenizer_type = ds_args.tokenizer_type
+ if tokenizer_type == "GPT2BPETokenizer":
+ tokenizer_model_name = "openai-community/gpt2"
+ elif tokenizer_type == "PretrainedFromHF":
+ tokenizer_model_name = ds_args.tokenizer_name_or_path
+ else:
+ raise ValueError(f"Unrecognized tokenizer_type {tokenizer_type}")
+ else:
+ tokenizer_model_name = "openai-community/gpt2"
+
+ tokenizer = AutoTokenizer.from_pretrained(tokenizer_model_name)
+ tokenizer_class = type(tokenizer).__name__
+ config.tokenizer_class = tokenizer_class
+
+ # Store the config to file.
+ print("Saving config")
+ config.save_pretrained(basename)
+
+ # Save tokenizer based on args
+ print(f"Adding {tokenizer_class} tokenizer files")
+ tokenizer.save_pretrained(basename)
+
+ # Store the state_dict to file.
+ output_checkpoint_file = os.path.join(basename, "pytorch_model.bin")
+ print(f'Saving checkpoint to "{output_checkpoint_file}"')
+ torch.save(output_state_dict, output_checkpoint_file)
+
+
+####################################################################################################
+
+if __name__ == "__main__":
+ main()
+
+####################################################################################################
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/rag/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/rag/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..77c6d83319e1f65a2d18a0b9f863b5262687c41e
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/rag/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/rag/__pycache__/configuration_rag.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/rag/__pycache__/configuration_rag.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9985e980a14ad308d3287bc2e3349d2499e470a5
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/rag/__pycache__/configuration_rag.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/rag/__pycache__/modeling_rag.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/rag/__pycache__/modeling_rag.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7d862a2f5271bd0bf3018ba3383ed59751154310
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/rag/__pycache__/modeling_rag.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/rag/__pycache__/modeling_tf_rag.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/rag/__pycache__/modeling_tf_rag.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..83507298a37dc1d282e41ed207b1cdcba9f5f21e
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/rag/__pycache__/modeling_tf_rag.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/rag/__pycache__/retrieval_rag.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/rag/__pycache__/retrieval_rag.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4868f069939fef290d86fa34beb8f9d3e805d020
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/rag/__pycache__/retrieval_rag.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/rag/__pycache__/tokenization_rag.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/rag/__pycache__/tokenization_rag.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0a9b18514b2839702380b639330597ce714fc8d7
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/rag/__pycache__/tokenization_rag.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/rag/modeling_tf_rag.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/rag/modeling_tf_rag.py
new file mode 100644
index 0000000000000000000000000000000000000000..9d8ed6504975286516f7dac65d03022158f4249c
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/rag/modeling_tf_rag.py
@@ -0,0 +1,1771 @@
+# coding=utf-8
+# Copyright 2020, The RAG Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""TFRAG model implementation."""
+
+
+from __future__ import annotations
+
+import copy
+from dataclasses import dataclass
+from typing import List, Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ...configuration_utils import PretrainedConfig
+from ...generation import TFLogitsProcessorList
+from ...modeling_tf_utils import (
+ TFCausalLanguageModelingLoss,
+ TFModelInputType,
+ TFPreTrainedModel,
+ keras,
+ shape_list,
+ unpack_inputs,
+)
+from ...utils import ModelOutput, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+from .configuration_rag import RagConfig
+from .retrieval_rag import RagRetriever
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "RagConfig"
+
+
+@dataclass
+class TFRetrievAugLMMarginOutput(ModelOutput):
+ """
+ Base class for retriever augmented marginalized models outputs.
+
+ Args:
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss.
+ logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head. The score is possibly marginalized over all documents for
+ each vocabulary token.
+ past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads,
+ sequence_length, embed_size_per_head)`).
+
+ Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used
+ (see `past_key_values` input) to speed up sequential decoding.
+ doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`):
+ Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
+ `question_encoder_last_hidden_state`.
+ retrieved_doc_embeds (`tf.Tensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*):
+ Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute
+ the `doc_scores`.
+ retrieved_doc_ids (`tf.Tensor` (int32) of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*):
+ The indexes of the embedded documents retrieved by the retriever.
+ context_input_ids (`tf.Tensor`(int32) of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.
+ context_attention_mask (`tf.Tensor` (int32) of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
+ retriever.
+ question_encoder_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden states at the output of the last layer of the question encoder pooled output of the
+ model.
+ question_enc_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.
+ question_enc_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the question encoder, after the attention softmax, used to compute the weighted
+ average in the self-attention heads.
+ generator_enc_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the generator encoder of the model.
+ generator_enc_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.
+ generator_enc_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted
+ average in the self-attention heads.
+ generator_dec_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.
+ generator_dec_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted
+ average in the self-attention heads.
+ """
+
+ loss: tf.Tensor | None = None
+ logits: tf.Tensor = None
+ past_key_values: List[tf.Tensor] | None = None
+ doc_scores: tf.Tensor | None = None
+ retrieved_doc_embeds: tf.Tensor | None = None
+ retrieved_doc_ids: tf.Tensor | None = None
+ context_input_ids: tf.Tensor | None = None
+ context_attention_mask: tf.Tensor | None = None
+ question_encoder_last_hidden_state: tf.Tensor | None = None
+ question_enc_hidden_states: Tuple[tf.Tensor, ...] | None = None
+ question_enc_attentions: Tuple[tf.Tensor, ...] | None = None
+ generator_enc_last_hidden_state: tf.Tensor | None = None
+ generator_enc_hidden_states: Tuple[tf.Tensor, ...] | None = None
+ generator_enc_attentions: Tuple[tf.Tensor, ...] | None = None
+ generator_dec_hidden_states: Tuple[tf.Tensor, ...] | None = None
+ generator_dec_attentions: Tuple[tf.Tensor, ...] | None = None
+
+
+@dataclass
+class TFRetrievAugLMOutput(ModelOutput):
+ """
+ Args:
+ logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head. The score is possibly marginalized over all documents for
+ each vocabulary token.
+ past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads,
+ sequence_length, embed_size_per_head)`).
+
+ Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used
+ (see `past_key_values` input) to speed up sequential decoding.
+ doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`):
+ Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
+ `question_encoder_last_hidden_state`.
+ retrieved_doc_embeds (`tf.Tensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*):
+ Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute
+ the `doc_scores`.
+ retrieved_doc_ids (`tf.Tensor` of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*):
+ The indexes of the embedded documents retrieved by the retriever.
+ context_input_ids (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.
+ context_attention_mask (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
+ retriever.
+ question_encoder_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden states at the output of the last layer of the question encoder pooled output of the
+ model.
+ question_enc_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.
+ question_enc_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the question encoder, after the attention softmax, used to compute the weighted
+ average in the self-attention heads.
+ generator_enc_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the generator encoder of the model.
+ generator_enc_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.
+ generator_enc_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted
+ average in the self-attention heads.
+ generator_dec_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.
+ generator_dec_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted
+ average in the self-attention heads.
+ """
+
+ logits: tf.Tensor = None
+ past_key_values: List[tf.Tensor] | None = None
+ doc_scores: tf.Tensor | None = None
+ retrieved_doc_embeds: tf.Tensor | None = None
+ retrieved_doc_ids: tf.Tensor | None = None
+ context_input_ids: tf.Tensor | None = None
+ context_attention_mask: tf.Tensor | None = None
+ question_encoder_last_hidden_state: tf.Tensor | None = None
+ question_enc_hidden_states: Tuple[tf.Tensor, ...] | None = None
+ question_enc_attentions: Tuple[tf.Tensor, ...] | None = None
+ generator_enc_last_hidden_state: tf.Tensor | None = None
+ generator_enc_hidden_states: Tuple[tf.Tensor, ...] | None = None
+ generator_enc_attentions: Tuple[tf.Tensor, ...] | None = None
+ generator_dec_hidden_states: Tuple[tf.Tensor, ...] | None = None
+ generator_dec_attentions: Tuple[tf.Tensor, ...] | None = None
+
+
+class TFRagPreTrainedModel(TFPreTrainedModel):
+ r"""
+ RAG models were released with the paper [Retrieval-Augmented Generation for Knowledge-Intensive NLP
+ Tasks](https://arxiv.org/abs/2005.11401) by Patrick Lewis, Ethan Perez, Aleksandra Piktus et al.
+
+ RAG is a retriever augmented model and encapsulate three components: a question encoder, a dataset retriever and a
+ generator, the encoder and generator are trainable while the retriever is just an indexed dataset.
+
+ """
+
+ config_class = RagConfig
+ base_model_prefix = "rag"
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
+
+ @classmethod
+ def from_pretrained_question_encoder_generator(
+ cls,
+ question_encoder_pretrained_model_name_or_path: str = None,
+ generator_pretrained_model_name_or_path: str = None,
+ retriever: RagRetriever = None,
+ *model_args,
+ **kwargs,
+ ) -> TFPreTrainedModel:
+ r"""
+ Instantiates an question encoder and a generator from one or two base classes of the library from pretrained
+ model checkpoints.
+
+ Params:
+ question_encoder_pretrained_model_name_or_path (`str`, *optional*):
+ Information necessary to initiate the question encoder. Can be either:
+
+ - A string with the *shortcut name* of a pretrained model to load from cache or download, e.g.,
+ `google-bert/bert-base-uncased`.
+ - A string with the *identifier name* of a pretrained model that was user-uploaded to our S3, e.g.,
+ `dbmdz/bert-base-german-cased`.
+ - A path to a *directory* containing model weights saved using
+ [`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
+ - A path or url to a *pytorch index checkpoint file* (e.g, `./pt_model/`). In this case,
+ `question_encoder_from_pt` should be set to `True`.
+
+ generator_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
+ Information necessary to initiate the generator. Can be either:
+
+ - A string with the *shortcut name* of a pretrained model to load from cache or download, e.g.,
+ `google-t5/t5-small`.
+ - A string with the *identifier name* of a pretrained model that was user-uploaded to our S3, e.g.,
+ `facebook/bart-base`.
+ - A path to a *directory* containing model weights saved using
+ [`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
+ - A path or url to a *pytorch checkpoint file* (e.g, `./pt_model/`). In this case,
+ `generator_from_pt` should be set to `True`.
+
+ model_args (remaining positional arguments, *optional*):
+ All remaining positional arguments will be passed to the underlying model's `__init__` method.
+ retriever ([`RagRetriever`], *optional*):
+ The retriever to use.
+ kwargs (remaining dictionary of keyword arguments, *optional*):
+ Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
+ `output_attentions=True`).
+
+ - To update the question_encoder configuration, use the prefix *question_encoder_* for each
+ configuration parameter.
+ - To update the generator configuration, use the prefix *generator_* for each configuration parameter.
+ - To update the parent model configuration, do not use a prefix for each configuration parameter.
+
+ Behaves differently depending on whether a `config` is provided or automatically loaded.
+
+ Example:
+
+ ```python
+ >>> from transformers import RagRetriever, TFRagModel
+
+ >>> # initialize a RAG from two pretrained models.
+ >>> model = TFRagModel.from_pretrained_question_encoder_generator(
+ ... "facebook/dpr-question_encoder-single-nq-base", "google-t5/t5-small"
+ ... )
+ >>> # alternatively, initialize from pytorch pretrained models can also be done
+ >>> model = TFRagModel.from_pretrained_question_encoder_generator(
+ ... "facebook/dpr-question_encoder-single-nq-base",
+ ... "facebook/bart-base",
+ ... generator_from_pt=True,
+ ... question_encoder_from_pt=True,
+ ... )
+
+ >>> # saving model after fine-tuning
+ >>> model.save_pretrained("./rag")
+
+ >>> # load retriever
+ >>> retriever = RagRetriever.from_pretrained(
+ ... "facebook/rag-token-base", index_name="exact", use_dummy_dataset=True
+ ... )
+ >>> # load fine-tuned model with retriever
+ >>> model = TFRagModel.from_pretrained("./rag", retriever=retriever)
+ ```"""
+
+ kwargs_question_encoder = {
+ argument[len("question_encoder_") :]: value
+ for argument, value in kwargs.items()
+ if argument.startswith("question_encoder_")
+ }
+
+ kwargs_generator = {
+ argument[len("generator_") :]: value
+ for argument, value in kwargs.items()
+ if argument.startswith("generator_")
+ }
+
+ # remove question_encoder, generator kwargs from kwargs
+ for key in kwargs_question_encoder.keys():
+ del kwargs["question_encoder_" + key]
+ for key in kwargs_generator.keys():
+ del kwargs["generator_" + key]
+
+ # Load and initialize the question_encoder and generator
+ # The distinction between question_encoder and generator at the model level is made
+ # by the value of the flag `is_generator` that we need to set correctly.
+ question_encoder = kwargs_question_encoder.pop("model", None)
+ if question_encoder is None:
+ assert question_encoder_pretrained_model_name_or_path is not None, (
+ "If `model` is not defined as an argument, a `question_encoder_pretrained_model_name_or_path` has to"
+ " be defined"
+ )
+
+ from ..auto.modeling_tf_auto import TFAutoModel
+
+ if "config" not in kwargs_question_encoder:
+ from ..auto.configuration_auto import AutoConfig
+
+ question_encoder_config = AutoConfig.from_pretrained(question_encoder_pretrained_model_name_or_path)
+ kwargs_question_encoder["config"] = question_encoder_config
+
+ question_encoder = TFAutoModel.from_pretrained(
+ question_encoder_pretrained_model_name_or_path,
+ name="question_encoder",
+ load_weight_prefix=cls.load_weight_prefix,
+ *model_args,
+ **kwargs_question_encoder,
+ )
+
+ generator = kwargs_generator.pop("generator", None)
+ if generator is None:
+ assert generator_pretrained_model_name_or_path is not None, (
+ "If `generator_model` is not defined as an argument, a `generator_pretrained_model_name_or_path` has"
+ " to be defined"
+ )
+
+ from ..auto.modeling_tf_auto import TFAutoModelForSeq2SeqLM
+
+ if "config" not in kwargs_generator:
+ from ..auto.configuration_auto import AutoConfig
+
+ generator_config = AutoConfig.from_pretrained(generator_pretrained_model_name_or_path)
+ kwargs_generator["config"] = generator_config
+
+ generator = TFAutoModelForSeq2SeqLM.from_pretrained(
+ generator_pretrained_model_name_or_path,
+ name="generator",
+ load_weight_prefix=cls.load_weight_prefix,
+ **kwargs_generator,
+ )
+
+ # instantiate config with corresponding kwargs
+ config = kwargs.get("config", None)
+ if config is None:
+ config = RagConfig.from_question_encoder_generator_configs(
+ question_encoder.config, generator.config, **kwargs
+ )
+
+ return cls(question_encoder=question_encoder, generator=generator, config=config, retriever=retriever)
+
+
+RAG_START_DOCSTRING = r"""
+
+ RAG is a sequence-to-sequence model which encapsulates two core components: a question encoder and a generator.
+ During a forward pass, we encode the input with the question encoder and pass it to the retriever to extract
+ relevant context documents. The documents are then prepended to the input. Such contextualized inputs is passed to
+ the generator.
+
+ The question encoder can be any *autoencoding* model, preferably [`TFDPRQuestionEncoder`], and the generator can be
+ any *seq2seq* model, preferably [`TFBartForConditionalGeneration`].
+
+ The model can be initialized with a [`RagRetriever`] for end-to-end generation or used in combination with the
+ outputs of a retriever in multiple steps---see examples for more details. The model is compatible any
+ *autoencoding* model as the `question_encoder` and any *seq2seq* model with language model head as the `generator`.
+ It has been tested with [`TFDPRQuestionEncoder`] as the `question_encoder` and [`TFBartForConditionalGeneration`]
+ as the `generator`.
+
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a Tensorflow [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model)
+ subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to
+ general usage and behavior.
+
+ The model is in a developing state as it is now fully supports in eager-mode only, and may not be exported in
+ SavedModel format.
+
+ Args:
+ config ([`RagConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
+ question_encoder ([`TFPreTrainedModel`]):
+ An encoder model compatible with the faiss index encapsulated by the `retriever`.
+ generator ([`TFPreTrainedModel`]):
+ A seq2seq model used as the generator in the RAG architecture.
+ retriever ([`RagRetriever`]):
+ A retriever class encapsulating a faiss index queried to obtain context documents for current inputs.
+"""
+
+
+RAG_FORWARD_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. [`RagConfig`], used to initialize the model, specifies
+ which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to
+ obtain the indices.
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ encoder_outputs (`tuple(tuple(tf.Tensor)`, *optional*)
+ Tuple consists of (`generator_enc_last_hidden_state`, *optional*: `generator_enc_hidden_states`,
+ *optional*: `generator_enc_attentions`). `generator_enc_last_hidden_state` of shape `(batch_size, n_docs *
+ sequence_length, hidden_size)` is a sequence of hidden-states at the output of the last layer of the
+ generator's encoder.
+
+ Used by the ([`TFRagModel`]) model during decoding.
+ decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Provide for generation tasks. `None` by default, construct as per instructions for the generator model
+ you're using with your RAG instance.
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+ past_key_values (`tuple(tuple(tf.Tensor))`):
+ Tuple consists of two elements: `encoder_outputs` of the RAG model (see `encoder_outputs`) and
+ `past_key_values` of the underlying generator. Can be used to speed up decoding. `past_key_values` are used
+ in the ([`RagTokenForGeneration`]) model during decoding.
+ doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`):
+ Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
+ `question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever` `doc_scores`
+ has to be provided to the forward pass. `doc_scores` can be computed via
+ `question_encoder_last_hidden_state` and `retrieved_doc_embeds`, see examples for more information.
+ context_input_ids (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the
+ retriever.
+
+ If the model has is not initialized with a `retriever` ``context_input_ids` has to be provided to the
+ forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. context_attention_mask
+ (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when
+ *output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question
+ encoder `input_ids` by the retriever.
+
+ If the model has is not initialized with a `retriever` `context_attention_mask` has to be provided to the
+ forward pass. `context_attention_mask` are returned by [`~RagRetriever.__call__`].
+ use_cache (`bool`, *optional*, defaults to `True`):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ output_retrieved(`bool`, *optional*):
+ Whether or not to return the `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
+ `context_attention_mask`. See returned tensors for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`TFRetrievAugLMOutput`] instead of a plain tuple.
+ n_docs (`int`, *optional*, defaults to `config.n_docs``)
+ Number of documents to retrieve and/or number of documents for which to generate an answer.
+"""
+
+
+@add_start_docstrings_to_model_forward(RAG_START_DOCSTRING)
+class TFRagModel(TFRagPreTrainedModel):
+ load_weight_prefix = "tf_rag_model_1"
+
+ def __init__(
+ self,
+ config: Optional[PretrainedConfig] = None,
+ question_encoder: Optional[TFPreTrainedModel] = None,
+ generator: Optional[TFPreTrainedModel] = None,
+ retriever: Optional[RagRetriever] = None,
+ load_weight_prefix: Optional[str] = None,
+ **kwargs,
+ ):
+ assert config is not None or (
+ question_encoder is not None and generator is not None
+ ), "Either a configuration or an question_encoder and a generator has to be provided."
+
+ if config is None:
+ config = RagConfig.from_question_encoder_generator_configs(
+ question_encoder.config, generator.config, **kwargs
+ )
+ else:
+ assert isinstance(config, self.config_class), f"config: {config} has to be of type {self.config_class}"
+ super().__init__(config, **kwargs)
+
+ if question_encoder is None:
+ from ..auto.modeling_tf_auto import TFAutoModel
+
+ question_encoder = TFAutoModel.from_config(config.question_encoder, name="question_encoder")
+
+ if generator is None:
+ from ..auto.modeling_tf_auto import TFAutoModelForSeq2SeqLM
+
+ load_weight_prefix = load_weight_prefix if load_weight_prefix is not None else self.load_weight_prefix
+ generator = TFAutoModelForSeq2SeqLM.from_config(
+ config.generator, name="generator", load_weight_prefix=load_weight_prefix + "/generator"
+ )
+
+ self.retriever = retriever
+ if self.retriever is not None:
+ assert isinstance(
+ retriever, RagRetriever
+ ), f"`self.retriever` is of type {type(self.retriever)}, but should be of type `RagRetriever`"
+ self.retriever = retriever
+
+ self.question_encoder = question_encoder
+ self.generator = generator
+
+ def set_retriever(self, retriever: RagRetriever):
+ self.retriever = retriever
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TFRetrievAugLMOutput, config_class=_CONFIG_FOR_DOC)
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ encoder_outputs: np.ndarray | tf.Tensor | None = None,
+ decoder_input_ids: np.ndarray | tf.Tensor | None = None,
+ decoder_attention_mask: np.ndarray | tf.Tensor | None = None,
+ past_key_values: Tuple[Tuple[Union[np.ndarray, tf.Tensor]]] | None = None,
+ doc_scores: np.ndarray | tf.Tensor | None = None,
+ context_input_ids: np.ndarray | tf.Tensor | None = None,
+ context_attention_mask: np.ndarray | tf.Tensor | None = None,
+ use_cache: bool | None = None,
+ output_attentions: bool | None = None,
+ output_hidden_states: bool | None = None,
+ output_retrieved: bool | None = None,
+ n_docs: int | None = None,
+ return_dict: bool | None = None,
+ training: bool = False,
+ **kwargs,
+ ) -> TFRetrievAugLMOutput:
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, RagRetriever, TFRagModel
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-token-base")
+ >>> retriever = RagRetriever.from_pretrained(
+ ... "facebook/rag-token-base", index_name="exact", use_dummy_dataset=True
+ ... )
+ >>> # initialize with RagRetriever to do everything in one forward call
+ >>> model = TFRagModel.from_pretrained("facebook/rag-token-base", retriever=retriever, from_pt=True)
+
+ >>> input_dict = tokenizer.prepare_seq2seq_batch(
+ ... "How many people live in Paris?", "In Paris, there are 10 million people.", return_tensors="tf"
+ ... )
+ >>> input_ids = input_dict["input_ids"]
+ >>> outputs = model(input_ids)
+ ```"""
+ assert (
+ "decoder_cached_states" not in kwargs
+ ), "Please use past_key_values to cache intermediate outputs" # from modeling_tf_bart.py
+
+ # aliasing to minimize code changing
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
+
+ # whether retriever has to be used
+ has_to_retrieve = (
+ self.retriever is not None
+ and (context_input_ids is None or context_attention_mask is None or doc_scores is None)
+ and encoder_outputs is None
+ )
+
+ # encoder_outputs are pre-computed during RAG-token generation
+ if encoder_outputs is None:
+ if has_to_retrieve:
+ question_enc_outputs = self.question_encoder(
+ input_ids, attention_mask=attention_mask, return_dict=True, training=training
+ )
+ # see https://github.com/huggingface/transformers/blob/main/src/transformers/models/dpr/modeling_tf_dpr.py#L91
+ question_encoder_last_hidden_state = question_enc_outputs[
+ 0
+ ] # hidden states of question encoder => pooler_output
+
+ retriever_outputs = self.retriever(
+ input_ids,
+ question_encoder_last_hidden_state.numpy(),
+ prefix=self.generator.config.prefix,
+ n_docs=n_docs,
+ return_tensors="tf",
+ )
+ context_input_ids, context_attention_mask, retrieved_doc_embeds, retrieved_doc_ids = (
+ retriever_outputs["context_input_ids"],
+ retriever_outputs["context_attention_mask"],
+ retriever_outputs["retrieved_doc_embeds"],
+ retriever_outputs["doc_ids"],
+ )
+
+ context_input_ids = tf.cast(context_input_ids, tf.int32)
+ context_attention_mask = tf.cast(context_attention_mask, tf.int32)
+ retrieved_doc_embeds = tf.cast(retrieved_doc_embeds, tf.float32)
+ retrieved_doc_ids = tf.cast(retrieved_doc_ids, tf.int32)
+
+ # compute doc_scores
+ doc_scores = tf.squeeze(
+ tf.matmul(
+ tf.expand_dims(question_encoder_last_hidden_state, axis=1),
+ retrieved_doc_embeds,
+ transpose_b=True,
+ ),
+ axis=1,
+ )
+
+ else:
+ assert context_input_ids is not None, (
+ "Make sure that `context_input_ids` are passed, if no `retriever` is set. Alternatively, you can"
+ " set a retriever using the `set_retriever(...)` function."
+ )
+ assert context_attention_mask is not None, (
+ "Make sure that `context_attention_mask` are passed, if no `retriever` is set. Alternatively, you"
+ " can set a retriever using the `set_retriever(...)` function."
+ )
+ assert doc_scores is not None, (
+ "Make sure that `doc_scores` are passed, if no `retriever` is set. Alternatively, you can set a"
+ " retriever using the `set_retriever(...)` function."
+ )
+
+ assert (
+ doc_scores is not None
+ ), "Make sure that `doc_scores` are passed when passing `encoder_outputs` to the forward function."
+
+ assert (doc_scores.shape[1] % n_docs) == 0, (
+ f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is"
+ f" {context_input_ids.shape[0]}."
+ )
+
+ # Decoder input without context documents
+ if decoder_input_ids is not None:
+ decoder_input_ids = tf.repeat(decoder_input_ids, n_docs, axis=0)
+
+ if decoder_attention_mask is not None:
+ decoder_attention_mask = tf.repeat(decoder_attention_mask, n_docs, axis=0)
+
+ gen_outputs = self.generator(
+ context_input_ids,
+ attention_mask=context_attention_mask,
+ encoder_outputs=encoder_outputs,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ return_dict=True,
+ training=training,
+ )
+
+ if not has_to_retrieve:
+ question_encoder_last_hidden_state = None
+ question_enc_hidden_states = None
+ question_enc_attentions = None
+ retrieved_doc_embeds = None
+ retrieved_doc_ids = None
+ else:
+ question_enc_hidden_states = question_enc_outputs.hidden_states
+ question_enc_attentions = question_enc_outputs.attentions
+
+ if not has_to_retrieve or not output_retrieved:
+ # don't output retrieved docs
+ context_input_ids = (None,)
+ context_attention_mask = None
+ retrieved_doc_embeds = None
+ retrieved_doc_ids = None
+
+ return TFRetrievAugLMOutput(
+ logits=gen_outputs.logits,
+ doc_scores=doc_scores,
+ past_key_values=gen_outputs.past_key_values,
+ context_input_ids=context_input_ids,
+ context_attention_mask=context_attention_mask,
+ retrieved_doc_embeds=retrieved_doc_embeds,
+ retrieved_doc_ids=retrieved_doc_ids,
+ question_encoder_last_hidden_state=question_encoder_last_hidden_state,
+ question_enc_hidden_states=question_enc_hidden_states,
+ question_enc_attentions=question_enc_attentions,
+ generator_enc_last_hidden_state=gen_outputs.encoder_last_hidden_state,
+ generator_enc_hidden_states=gen_outputs.encoder_hidden_states,
+ generator_enc_attentions=gen_outputs.encoder_attentions,
+ generator_dec_hidden_states=gen_outputs.decoder_hidden_states,
+ generator_dec_attentions=gen_outputs.decoder_attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ with tf.name_scope(self.generator.name):
+ self.generator.build(None)
+ with tf.name_scope(self.question_encoder.name):
+ self.question_encoder.build(None)
+
+
+@add_start_docstrings_to_model_forward(
+ """
+ A TF RAG-token model implementation. It performs RAG-token specific marginalization in the forward pass.
+ """,
+ RAG_START_DOCSTRING,
+)
+class TFRagTokenForGeneration(TFRagPreTrainedModel, TFCausalLanguageModelingLoss):
+ load_weight_prefix = "tf_rag_token_for_generation_1/rag"
+
+ def __init__(
+ self,
+ config: Optional[PretrainedConfig] = None,
+ question_encoder: Optional[TFPreTrainedModel] = None,
+ generator: Optional[TFPreTrainedModel] = None,
+ retriever: Optional[RagRetriever] = None,
+ **kwargs,
+ ):
+ assert config is not None or (
+ question_encoder is not None and generator is not None
+ ), "Either a configuration or an encoder and a generator has to be provided."
+
+ if config is None:
+ config = RagConfig.from_question_encoder_generator_configs(
+ question_encoder.config, generator.config, **kwargs
+ )
+
+ super().__init__(config)
+
+ # instantiate model
+ self.rag = TFRagModel(
+ config=config,
+ question_encoder=question_encoder,
+ generator=generator,
+ retriever=retriever,
+ load_weight_prefix=self.load_weight_prefix,
+ name="rag",
+ )
+
+ def set_retriever(self, retriever: RagRetriever):
+ self.rag.retriever = retriever
+
+ # Adapted from https://github.com/huggingface/transformers/blob/main/src/transformers/modeling_tf_bart.py
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ doc_scores=None,
+ n_docs=None,
+ **kwargs,
+ ):
+ if past_key_values is not None:
+ # if past is defined use only last decoder_input_ids
+ decoder_input_ids = decoder_input_ids[:, -1:]
+
+ return {
+ "input_ids": None,
+ "encoder_outputs": encoder_outputs,
+ "doc_scores": doc_scores,
+ "context_attention_mask": attention_mask,
+ "decoder_input_ids": decoder_input_ids,
+ "past_key_values": past_key_values,
+ "use_cache": use_cache,
+ "do_marginalize": True,
+ "n_docs": n_docs,
+ }
+
+ @property
+ def retriever(self):
+ return self.rag.retriever
+
+ @property
+ def generator(self):
+ return self.rag.generator
+
+ @property
+ def question_encoder(self):
+ return self.rag.question_encoder
+
+ @staticmethod
+ def _gather_beams(nested, beam_indices, batch_axis=0):
+ """
+ RAG-specific `_gather_beams`: gathers the beam slices indexed by beam_indices into new beam array. If the
+ nested tensor has a shape mismatch with the beam indices, then it means it is the cache. In that case, isolates
+ and takes care of the extra dimension for ndocs.
+ """
+
+ def gather_fn(tensor):
+ is_rag_cache = tensor.shape[0] != beam_indices.shape[0]
+ if is_rag_cache:
+ n_docs = tensor.shape[0] // beam_indices.shape[0]
+ batch_size = beam_indices.shape[0]
+ # reshapes into (batch size, num beams, n_docs, ...), the cache format expected by RAG
+ tensor = tf.reshape(tensor, (batch_size, -1, n_docs, *tensor.shape[2:]))
+
+ gathered_tensor = tf.gather(params=tensor, indices=beam_indices, axis=1, batch_dims=1)
+
+ if is_rag_cache:
+ # reshapes back into the shape expected by beam search
+ gathered_tensor = tf.reshape(gathered_tensor, (batch_size * n_docs, -1, *gathered_tensor.shape[3:]))
+
+ return gathered_tensor
+
+ return tf.nest.map_structure(gather_fn, nested)
+
+ def marginalize(self, seq_logits, doc_scores, n_docs=None):
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
+
+ # RAG-token marginalization
+ seq_logprobs = tf.nn.log_softmax(seq_logits, axis=-1)
+ seq_logprobs = tf.reshape(seq_logprobs, [seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.shape[-1]])
+ doc_logprobs = tf.nn.log_softmax(doc_scores, axis=1)
+ doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1)
+ doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1) # twice
+ log_prob_sum = seq_logprobs + doc_logprobs
+ return tf.reduce_logsumexp(log_prob_sum, axis=1)
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TFRetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC)
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ decoder_input_ids: np.ndarray | tf.Tensor | None = None,
+ decoder_attention_mask: np.ndarray | tf.Tensor | None = None,
+ encoder_outputs: np.ndarray | tf.Tensor | None = None,
+ past_key_values: Tuple[Tuple[Union[np.ndarray, tf.Tensor]]] | None = None,
+ doc_scores: np.ndarray | tf.Tensor | None = None,
+ context_input_ids: np.ndarray | tf.Tensor | None = None,
+ context_attention_mask: np.ndarray | tf.Tensor | None = None,
+ use_cache: bool | None = None,
+ output_attentions: bool | None = None,
+ output_hidden_states: bool | None = None,
+ output_retrieved: bool | None = None,
+ n_docs: int | None = None,
+ do_marginalize: bool | None = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ reduce_loss: bool | None = None,
+ return_dict: bool | None = None,
+ training: bool = False,
+ **kwargs, # needs kwargs for generation
+ ) -> TFRetrievAugLMMarginOutput:
+ r"""
+ do_marginalize (`bool`, *optional*):
+ If `True`, the logits are marginalized over all documents by making use of
+ `torch.nn.functional.log_softmax`.
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the cross entropy classification loss according to Rag-Token model formulation See
+ https://arxiv.org/pdf/2005.11401.pdf Section 2.1 for details about Rag-Token formulation. Indices should be
+ in `[0, ..., config.vocab_size - 1]`.
+ reduce_loss (`bool`, *optional*):
+ Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `tf.Tensor.sum`
+ operation.
+ kwargs (`Dict[str, any]`, optional, defaults to *{}*):
+ Legacy dictionary, which is required so that model can use *generate()* function.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> import tensorflow as tf
+ >>> from transformers import AutoTokenizer, RagRetriever, TFRagTokenForGeneration
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-token-nq")
+ >>> retriever = RagRetriever.from_pretrained(
+ ... "facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True
+ ... )
+ >>> # initialize with RagRetriever to do everything in one forward call
+ >>> model = TFRagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever, from_pt=True)
+
+ >>> input_dict = tokenizer.prepare_seq2seq_batch(
+ ... "How many people live in Paris?", "In Paris, there are 10 million people.", return_tensors="tf"
+ ... )
+ >>> outputs = model(input_dict, output_retrieved=True)
+
+ >>> # or use retriever separately
+ >>> # 1. Encode
+ >>> input_ids = input_dict["input_ids"]
+ >>> question_hidden_states = model.question_encoder(input_ids)[0]
+ >>> # 2. Retrieve
+ >>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.numpy(), return_tensors="tf")
+ >>> doc_scores = tf.squeeze(
+ ... tf.matmul(
+ ... tf.expand_dims(question_hidden_states, axis=1), docs_dict["retrieved_doc_embeds"], transpose_b=True
+ ... ),
+ ... axis=1,
+ ... )
+ >>> # 3. Forward to generator
+ >>> outputs = model(
+ ... inputs=None,
+ ... context_input_ids=docs_dict["context_input_ids"],
+ ... context_attention_mask=docs_dict["context_attention_mask"],
+ ... doc_scores=doc_scores,
+ ... decoder_input_ids=input_dict["labels"],
+ ... )
+
+ >>> # or directly generate
+ >>> generated = model.generate(
+ ... context_input_ids=docs_dict["context_input_ids"],
+ ... context_attention_mask=docs_dict["context_attention_mask"],
+ ... doc_scores=doc_scores,
+ ... )
+ >>> generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True)
+ ```"""
+
+ assert (
+ "decoder_cached_states" not in kwargs
+ ), "Please use past_key_values to cache intermediate outputs" # from modeling_tf_bart.py
+
+ do_marginalize = do_marginalize if do_marginalize else self.config.do_marginalize
+ reduce_loss = reduce_loss if reduce_loss else self.config.reduce_loss
+
+ if labels is not None:
+ if decoder_input_ids is None:
+ decoder_input_ids = labels
+ use_cache = False
+
+ outputs = self.rag(
+ input_ids,
+ attention_mask=attention_mask,
+ encoder_outputs=encoder_outputs,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ context_input_ids=context_input_ids,
+ context_attention_mask=context_attention_mask,
+ doc_scores=doc_scores,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ output_retrieved=output_retrieved,
+ n_docs=n_docs,
+ training=training,
+ )
+
+ loss = None
+ logits = outputs.logits
+ if labels is not None:
+ assert decoder_input_ids is not None
+ loss = self.get_nll(
+ outputs.logits,
+ outputs.doc_scores,
+ labels,
+ reduce_loss=reduce_loss,
+ epsilon=self.config.label_smoothing,
+ n_docs=n_docs,
+ )
+
+ if do_marginalize:
+ logits = self.marginalize(logits, outputs.doc_scores, n_docs)
+
+ return TFRetrievAugLMMarginOutput(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ doc_scores=outputs.doc_scores,
+ context_input_ids=outputs.context_input_ids,
+ context_attention_mask=outputs.context_attention_mask,
+ retrieved_doc_embeds=outputs.retrieved_doc_embeds,
+ retrieved_doc_ids=outputs.retrieved_doc_ids,
+ question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state,
+ question_enc_hidden_states=outputs.question_enc_hidden_states,
+ question_enc_attentions=outputs.question_enc_attentions,
+ generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state,
+ generator_enc_hidden_states=outputs.generator_enc_hidden_states,
+ generator_enc_attentions=outputs.generator_enc_attentions,
+ generator_dec_hidden_states=outputs.generator_dec_hidden_states,
+ generator_dec_attentions=outputs.generator_dec_attentions,
+ )
+
+ def generate(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: tf.Tensor | None = None,
+ context_input_ids=None,
+ context_attention_mask=None,
+ doc_scores=None,
+ n_docs=None,
+ generation_config=None,
+ logits_processor=TFLogitsProcessorList(),
+ **kwargs,
+ ):
+ """
+ Implements TFRAG token decoding.
+
+ Args:
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ The sequence used as a prompt for the generation. If `input_ids` is not passed, then
+ `context_input_ids` has to be provided.
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ context_input_ids (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the
+ retriever.
+
+ If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the
+ forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
+ context_attention_mask (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
+ retriever.
+
+ If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the
+ forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
+ doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`):
+ Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
+ `question_encoder_last_hidden_state`.
+
+ If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the
+ forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
+ n_docs (`int`, *optional*, defaults to `config.n_docs`)
+ Number of documents to retrieve and/or number of documents for which to generate an answer.
+ generation_config (`~generation.GenerationConfig`, *optional*):
+ The generation configuration to be used as base parametrization for the generation call. `**kwargs`
+ passed to generate matching the attributes of `generation_config` will override them. If
+ `generation_config` is not provided, the default will be used, which had the following loading
+ priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
+ configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
+ default values, whose documentation should be checked to parameterize generation.
+ logits_processor (`TFLogitsProcessorList`, *optional*):
+ Custom logits processors that complement the default logits processors built from arguments and a
+ model's config. If a logit processor is passed that is already created with the arguments or a model's
+ config an error is thrown.
+ kwargs (`Dict[str, Any]`, *optional*):
+ Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
+ forwarded to the `forward` function of the model.
+
+ Return:
+ `tf.Tensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated sequences. The
+ second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early
+ due to the `eos_token_id`.
+ """
+ # Handle `generation_config` and kwargs that might update it
+ if generation_config is None:
+ generation_config = self.generation_config
+ generation_config = copy.deepcopy(generation_config)
+ model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs
+
+ # set default parameters
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
+
+ # retrieve docs
+ if self.retriever is not None and context_input_ids is None:
+ question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0]
+ out = self.retriever(
+ input_ids,
+ question_hidden_states.numpy().astype(np.float32),
+ prefix=self.generator.config.prefix,
+ n_docs=n_docs,
+ return_tensors="tf",
+ )
+ context_input_ids, context_attention_mask, retrieved_doc_embeds = (
+ out["context_input_ids"],
+ out["context_attention_mask"],
+ out["retrieved_doc_embeds"],
+ )
+
+ context_input_ids = tf.cast(context_input_ids, tf.int32)
+ context_attention_mask = tf.cast(context_attention_mask, tf.int32)
+ retrieved_doc_embeds = tf.cast(retrieved_doc_embeds, tf.float32)
+
+ # compute doc_scores
+ doc_scores = tf.matmul(
+ tf.expand_dims(question_hidden_states, axis=1), retrieved_doc_embeds, transpose_b=True
+ )
+ doc_scores = tf.squeeze(doc_scores, axis=1)
+
+ assert (context_input_ids.shape[0] % n_docs) == 0, (
+ f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is"
+ f" {context_input_ids.shape[0]}."
+ )
+
+ batch_size = context_input_ids.shape[0] // n_docs
+
+ encoder = self.rag.generator.get_encoder()
+ encoder_outputs = encoder(
+ input_ids=context_input_ids,
+ attention_mask=context_attention_mask,
+ output_attentions=generation_config.output_attentions,
+ output_hidden_states=generation_config.output_hidden_states,
+ return_dict=True,
+ )
+
+ decoder_input_ids = tf.fill(
+ (batch_size * generation_config.num_beams, 1),
+ tf.cast(generation_config.decoder_start_token_id, tf.int32),
+ )
+ last_hidden_state = encoder_outputs["last_hidden_state"]
+
+ def extend_enc_output(tensor, num_beams=None):
+ """
+ Broadcast tensor with `num_beams` replica, with correct order Input: tensor of shape (batch_size*n_docs ,
+ d) Output: tensor of shape (batch_size*num_beams*n_docs , d)
+ """
+
+ # expand batch_size & num_beam dimensions
+ d_shape_list = tensor.shape[1:]
+
+ # split n_docs dimensions
+ new_shape = (batch_size, 1, n_docs) + d_shape_list
+ tensor = tf.reshape(tensor, new_shape)
+
+ # repeat same last hidden states over `num_beams` dimension
+ new_shape = (batch_size, num_beams, n_docs) + d_shape_list
+ tensor = tf.broadcast_to(tensor, new_shape)
+
+ # merge `batch_size`, `num_beams`, `num_docs` dims again
+ new_shape = (batch_size * num_beams * n_docs,) + d_shape_list
+ return tf.reshape(tensor, new_shape)
+
+ # correctly extend last_hidden_state and attention mask
+ context_attention_mask = extend_enc_output(context_attention_mask, num_beams=generation_config.num_beams)
+ encoder_outputs["last_hidden_state"] = extend_enc_output(
+ last_hidden_state, num_beams=generation_config.num_beams
+ )
+
+ doc_scores = tf.repeat(doc_scores, generation_config.num_beams, axis=0)
+
+ # define start_len & additional parameters
+ model_kwargs["doc_scores"] = doc_scores
+ model_kwargs["encoder_outputs"] = encoder_outputs
+ model_kwargs["attention_mask"] = context_attention_mask
+ model_kwargs["n_docs"] = n_docs
+
+ pre_processor = self._get_logits_processor(
+ generation_config=generation_config,
+ input_ids_seq_length=tf.shape(decoder_input_ids)[-1],
+ logits_processor=logits_processor,
+ )
+
+ if generation_config.num_beams == 1:
+ return self.greedy_search(
+ input_ids=decoder_input_ids,
+ max_length=generation_config.max_length,
+ pad_token_id=generation_config.pad_token_id,
+ eos_token_id=generation_config.eos_token_id,
+ logits_processor=pre_processor,
+ output_attentions=generation_config.output_attentions,
+ output_hidden_states=generation_config.output_hidden_states,
+ output_scores=generation_config.output_scores,
+ return_dict_in_generate=generation_config.return_dict_in_generate,
+ **model_kwargs,
+ )
+ elif generation_config.num_beams > 1:
+ if generation_config.num_beams < generation_config.num_return_sequences:
+ raise ValueError(
+ "Beam search decoding cannot return more sequences than it has beams. Please set num_beams >="
+ f" num_return_sequences, got {generation_config.num_beams} and"
+ f" {generation_config.num_return_sequences} (respectivelly)"
+ )
+
+ def unflatten_beam_dim(tensor):
+ """Unflattens the first, flat batch*beam dimension of a non-scalar array."""
+ shape = shape_list(tensor)
+ return tf.reshape(tensor, [-1, generation_config.num_beams] + shape[1:])
+
+ decoder_input_ids = unflatten_beam_dim(decoder_input_ids)
+ model_kwargs["attention_mask"] = unflatten_beam_dim(model_kwargs["attention_mask"])
+ model_kwargs["encoder_outputs"]["last_hidden_state"] = unflatten_beam_dim(
+ model_kwargs["encoder_outputs"]["last_hidden_state"]
+ )
+
+ return self.beam_search(
+ input_ids=decoder_input_ids,
+ max_length=generation_config.max_length,
+ pad_token_id=generation_config.pad_token_id,
+ eos_token_id=generation_config.eos_token_id,
+ logits_processor=pre_processor,
+ output_attentions=generation_config.output_attentions,
+ output_hidden_states=generation_config.output_hidden_states,
+ output_scores=generation_config.output_scores,
+ return_dict_in_generate=generation_config.return_dict_in_generate,
+ **model_kwargs,
+ )
+ else:
+ raise ValueError(
+ f"`num_beams` has to be an integer strictly superior to 0 (≥ 1), but is {generation_config.num_beams}"
+ )
+
+ def get_input_embeddings(self):
+ return self.rag.generator.get_input_embeddings()
+
+ def get_output_embeddings(self):
+ return self.rag.generator.get_output_embeddings()
+
+ # Adapted from tf_t5's & tf_bart's _shift_right
+ def shift_tokens_right(self, input_ids, start_token_id=None):
+ """Shift input ids one token to the right, and pad with start_token_id"""
+
+ if start_token_id is None:
+ start_token_id = self.generator.config.decoder_start_token_id
+ assert start_token_id is not None, (
+ "self.generator.config.decoder_start_token_id has to be defined. In Rag we commonly use Bart as"
+ " generator, see Bart docs for more information"
+ )
+
+ pad_token_id = self.generator.config.pad_token_id
+ assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
+
+ start_tokens = tf.fill((shape_list(input_ids)[0], 1), tf.cast(start_token_id, input_ids.dtype))
+ shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)
+
+ # replace possible -100 values in labels by `pad_token_id`
+ shifted_input_ids = tf.where(
+ shifted_input_ids == -100,
+ tf.fill(shape_list(shifted_input_ids), tf.cast(pad_token_id, input_ids.dtype)),
+ shifted_input_ids,
+ )
+
+ # "Verify that `labels` has only positive values and -100"
+ assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.cast(0, shifted_input_ids.dtype))
+
+ # Make sure the assertion op is called by wrapping the result in an identity no-op
+ with tf.control_dependencies([assert_gte0]):
+ shifted_input_ids = tf.identity(shifted_input_ids)
+
+ return shifted_input_ids
+
+ # nll stands for 'negative log likelihood'
+ def get_nll(self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, n_docs=None):
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
+ # shift tokens left (from original Pytorch's version)
+
+ target = tf.concat(
+ [target[:, 1:], tf.fill([target.shape[0], 1], tf.cast(self.config.generator.pad_token_id, target.dtype))],
+ axis=1,
+ )
+ rag_logprobs = self.marginalize(seq_logits, doc_scores, n_docs)
+ loss = self.hf_compute_loss(target, rag_logprobs, from_logits=True, reduce_loss=reduce_loss)
+
+ return loss
+
+ # Adopted modeling_tf_bart + add smooth_loss to match with pytorch version
+ def hf_compute_loss(self, labels, y_pred, smooth_epsilon=0.0, from_logits=True, reduce_loss=False):
+ """CrossEntropyLoss that ignores pad tokens"""
+ # Matt: As written, this loss is not XLA-compatible, but it's doing some very weird things
+ # and I don't feel comfortable converting it.
+ loss_fn = keras.losses.SparseCategoricalCrossentropy(
+ from_logits=True,
+ reduction=keras.losses.Reduction.SUM,
+ )
+
+ if from_logits is False: # convert to logits
+ eps = 1e-9
+ y_pred = tf.clip_by_value(y_pred, clip_value_min=eps, clip_value_max=1 - eps)
+ y_pred = tf.math.log(y_pred)
+
+ logits = y_pred
+ melted_labels = tf.reshape(labels, (-1,))
+ active_loss = tf.not_equal(melted_labels, self.config.generator.pad_token_id)
+
+ reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, logits.shape[2])), active_loss)
+ labels = tf.boolean_mask(melted_labels, active_loss)
+ nll_loss = loss_fn(labels, reduced_logits)
+
+ smooth_loss = -tf.reduce_sum(reduced_logits, axis=-1)
+ smooth_loss = tf.reduce_sum(smooth_loss) # sum and squeeze like torch
+ eps_i = smooth_epsilon / reduced_logits.shape[-1]
+
+ loss = (1.0 - smooth_epsilon) * nll_loss + eps_i * smooth_loss
+
+ return loss
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "rag", None) is not None:
+ with tf.name_scope(self.rag.name):
+ self.rag.build(None)
+
+
+@add_start_docstrings_to_model_forward(
+ """
+ A TF RAG-sequence model implementation. It performs RAG-sequence specific marginalization in the forward pass.
+ """,
+ RAG_START_DOCSTRING,
+)
+class TFRagSequenceForGeneration(TFRagPreTrainedModel, TFCausalLanguageModelingLoss):
+ load_weight_prefix = "tf_rag_sequence_for_generation_1/rag"
+
+ def __init__(
+ self,
+ config: Optional[PretrainedConfig] = None,
+ question_encoder: Optional[TFPreTrainedModel] = None,
+ generator: Optional[TFPreTrainedModel] = None,
+ retriever: Optional[RagRetriever] = None,
+ **kwargs,
+ ):
+ assert config is not None or (
+ question_encoder is not None and generator is not None
+ ), "Either a configuration or an encoder and a generator has to be provided."
+
+ if config is None:
+ config = RagConfig.from_question_encoder_generator_configs(
+ question_encoder.config, generator.config, **kwargs
+ )
+
+ super().__init__(config)
+
+ # instantiate model
+ self.rag = TFRagModel(
+ config=config,
+ question_encoder=question_encoder,
+ generator=generator,
+ retriever=retriever,
+ load_weight_prefix=self.load_weight_prefix,
+ name="rag",
+ )
+
+ def set_retriever(self, retriever: RagRetriever):
+ self.rag.retriever = retriever
+
+ @property
+ def retriever(self):
+ return self.rag.retriever
+
+ @property
+ def generator(self):
+ return self.rag.generator
+
+ @property
+ def question_encoder(self):
+ return self.rag.question_encoder
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TFRetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC)
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ decoder_input_ids: np.ndarray | tf.Tensor | None = None,
+ decoder_attention_mask: np.ndarray | tf.Tensor | None = None,
+ encoder_outputs: np.ndarray | tf.Tensor | None = None,
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
+ doc_scores: np.ndarray | tf.Tensor | None = None,
+ context_input_ids: np.ndarray | tf.Tensor | None = None,
+ context_attention_mask: np.ndarray | tf.Tensor | None = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_retrieved: Optional[bool] = None,
+ n_docs: Optional[int] = None,
+ exclude_bos_score: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ reduce_loss: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ **kwargs, # needs kwargs for generation
+ ) -> Union[Tuple[tf.Tensor], TFRetrievAugLMMarginOutput]:
+ r"""
+ exclude_bos_score (`bool`, *optional*):
+ Only relevant if `labels` is passed. If `True`, the score of the BOS token is disregarded when computing
+ the loss.
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the cross entropy classification loss according to Rag-Sequence model formulation See
+ https://arxiv.org/pdf/2005.11401.pdf Section 2.1 for details about Rag-Sequence formulation. Indices should
+ be in `[0, ..., config.vocab_size - 1]`.
+ reduce_loss (`bool`, *optional*):
+ Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `tf.Tensor.sum`
+ operation.
+ kwargs (`Dict[str, any]`, optional, defaults to *{}*):
+ Legacy dictionary, which is required so that model can use *generate()* function.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, RagRetriever, TFRagSequenceForGeneration
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-sequence-nq")
+ >>> retriever = RagRetriever.from_pretrained(
+ ... "facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True
+ ... )
+ >>> # initialize with RagRetriever to do everything in one forward call
+ >>> model = TFRagSequenceForGeneration.from_pretrained(
+ ... "facebook/rag-sequence-nq", retriever=retriever, from_pt=True
+ ... )
+
+ >>> input_dict = tokenizer.prepare_seq2seq_batch(
+ ... "How many people live in Paris?", "In Paris, there are 10 million people.", return_tensors="tf"
+ ... )
+ >>> outputs = model(input_dict, output_retrieved=True)
+
+ >>> # or use retriever separately
+ >>> # 1. Encode
+ >>> input_ids = input_dict["input_ids"]
+ >>> question_hidden_states = model.question_encoder(input_ids)[0]
+ >>> # 2. Retrieve
+ >>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.numpy(), return_tensors="tf")
+ >>> doc_scores = tf.squeeze(
+ ... tf.matmul(
+ ... tf.expand_dims(question_hidden_states, axis=1), docs_dict["retrieved_doc_embeds"], transpose_b=True
+ ... ),
+ ... axis=1,
+ ... )
+ >>> # 3. Forward to generator
+ >>> outputs = model(
+ ... inputs=None,
+ ... context_input_ids=docs_dict["context_input_ids"],
+ ... context_attention_mask=docs_dict["context_attention_mask"],
+ ... doc_scores=doc_scores,
+ ... decoder_input_ids=input_dict["labels"],
+ ... )
+
+ >>> # or directly generate
+ >>> generated = model.generate(
+ ... context_input_ids=docs_dict["context_input_ids"],
+ ... context_attention_mask=docs_dict["context_attention_mask"],
+ ... doc_scores=doc_scores,
+ ... )
+ >>> generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True)
+ ```"""
+
+ assert (
+ "decoder_cached_states" not in kwargs
+ ), "Please use past_key_values to cache intermediate outputs" # from modeling_tf_bart.py
+
+ exclude_bos_score = exclude_bos_score if exclude_bos_score else self.config.exclude_bos_score
+ reduce_loss = reduce_loss if reduce_loss else self.config.reduce_loss
+
+ if labels is not None:
+ if decoder_input_ids is None:
+ decoder_input_ids = labels
+ use_cache = False
+
+ outputs = self.rag(
+ input_ids,
+ attention_mask=attention_mask,
+ encoder_outputs=encoder_outputs,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ context_input_ids=context_input_ids,
+ context_attention_mask=context_attention_mask,
+ doc_scores=doc_scores,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ output_retrieved=output_retrieved,
+ n_docs=n_docs,
+ training=training,
+ )
+
+ loss = None
+ if labels is not None:
+ loss = self.get_nll(
+ outputs.logits,
+ outputs.doc_scores,
+ labels,
+ reduce_loss=reduce_loss,
+ epsilon=self.config.label_smoothing,
+ n_docs=n_docs,
+ )
+
+ return TFRetrievAugLMMarginOutput(
+ loss=loss,
+ logits=outputs.logits,
+ doc_scores=outputs.doc_scores,
+ past_key_values=outputs.past_key_values,
+ context_input_ids=outputs.context_input_ids,
+ context_attention_mask=outputs.context_attention_mask,
+ retrieved_doc_embeds=outputs.retrieved_doc_embeds,
+ retrieved_doc_ids=outputs.retrieved_doc_ids,
+ question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state,
+ question_enc_hidden_states=outputs.question_enc_hidden_states,
+ question_enc_attentions=outputs.question_enc_attentions,
+ generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state,
+ generator_enc_hidden_states=outputs.generator_enc_hidden_states,
+ generator_enc_attentions=outputs.generator_enc_attentions,
+ generator_dec_hidden_states=outputs.generator_dec_hidden_states,
+ generator_dec_attentions=outputs.generator_dec_attentions,
+ )
+
+ def get_nll(
+ self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, exclude_bos_score=False, n_docs=None
+ ):
+ # shift tokens left
+ target = tf.concat(
+ [target[:, 1:], tf.fill([target.shape[0], 1], tf.cast(self.config.generator.pad_token_id, target.dtype))],
+ axis=1,
+ )
+
+ # bos_token_id is None for T5
+ bos_token_id = self.config.bos_token_id or self.config.generator.bos_token_id
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
+ equal_bos_token_id_all = tf.reduce_all(tf.equal(target[:, 0], bos_token_id))
+ use_bos = bos_token_id is not None and equal_bos_token_id_all
+
+ def _mask_pads(ll, smooth_obj):
+ pad_mask = tf.equal(target, tf.cast(self.config.generator.pad_token_id, target.dtype))
+ if tf.reduce_any(pad_mask):
+ ll = tf.where(pad_mask, 0.0, ll)
+ smooth_obj = tf.where(pad_mask, 0.0, smooth_obj)
+ return tf.squeeze(ll, axis=-1), tf.squeeze(smooth_obj, axis=-1)
+
+ # seq_logits.shape = (batch*n_docs, tgt_len , vocabs)
+ seq_logprobs = tf.nn.log_softmax(seq_logits, axis=-1)
+ seq_logprobs = tf.reshape(
+ seq_logprobs, (seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.shape[-1])
+ ) # (batch_size, n_docs, tgt_len, vocabs)
+ doc_logprobs = tf.nn.log_softmax(doc_scores, axis=1)
+ doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1)
+ doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1) # done twice to get 4-D
+
+ # RAG-sequence marginalization
+ first_token_scores = seq_logprobs[:, :, :1, :]
+ second_token_scores = seq_logprobs[:, :, 1:2, :]
+ remainder = seq_logprobs[:, :, 2:, :]
+ rag_logprobs = tf.concat([first_token_scores, second_token_scores + doc_logprobs, remainder], axis=2)
+
+ # calculate loss
+ target = tf.expand_dims(target, axis=1) # n_docs dimension
+ target = tf.expand_dims(target, axis=-1) # logits dimension
+ target = tf.repeat(target, n_docs, axis=1)
+ assert len(target.shape) == len(rag_logprobs.shape)
+
+ # last-axis gathering only - use 2D-reshape-trick for Torch's style nD gathering
+ def torch_gather(param, id_tensor):
+ # 2d-gather torch equivalent: https://stackoverflow.com/questions/52129909/tensorflow-equivalent-of-torch-gather
+ def gather2d(target, id_tensor):
+ idx = tf.stack([tf.range(tf.shape(id_tensor)[0], dtype=id_tensor.dtype), id_tensor[:, 0]], axis=-1)
+ result = tf.gather_nd(target, idx)
+ return tf.expand_dims(result, axis=-1)
+
+ target = tf.reshape(param, (-1, param.shape[-1])) # reshape 2D
+ target_shape = id_tensor.shape
+
+ id_tensor = tf.reshape(id_tensor, (-1, 1)) # also 2D-index
+ result = gather2d(target, id_tensor)
+ return tf.reshape(result, target_shape)
+
+ ll = torch_gather(rag_logprobs, id_tensor=target)
+ smooth_obj = tf.reduce_sum(rag_logprobs, axis=-1, keepdims=True) # total sum of all (normalised) logits
+
+ ll, smooth_obj = _mask_pads(ll, smooth_obj)
+
+ # sum over tokens, exclude bos while scoring
+ if exclude_bos_score and use_bos:
+ ll = tf.reduce_sum(ll[:, :, 1:], axis=2)
+ else:
+ ll = tf.reduce_sum(ll, axis=2)
+
+ smooth_obj = tf.reduce_sum(smooth_obj, axis=2)
+ ll = tf.math.reduce_logsumexp(ll, axis=1) # logsumexp over docs
+ smooth_obj = tf.math.reduce_logsumexp(smooth_obj, axis=1)
+
+ nll_loss = -ll
+ smooth_loss = -smooth_obj
+
+ if reduce_loss:
+ nll_loss = tf.reduce_sum(nll_loss)
+ smooth_loss = tf.reduce_sum(smooth_loss)
+
+ eps_i = epsilon / rag_logprobs.shape[-1]
+ loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss
+ return loss
+
+ def generate(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: tf.Tensor | None = None,
+ context_input_ids=None,
+ context_attention_mask=None,
+ doc_scores=None,
+ do_deduplication=None, # defaults to True
+ num_return_sequences=None, # defaults to 1
+ num_beams=None, # defaults to 1
+ n_docs=None,
+ **model_kwargs,
+ ):
+ """
+ Implements RAG sequence "thorough" decoding. Read the [`~generation.GenerationMixin.generate`]` documentation
+ for more information on how to set other generate input parameters
+
+ Args:
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ The sequence used as a prompt for the generation. If `input_ids` is not passed, then
+ `context_input_ids` has to be provided.
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for
+ tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention
+ masks?](../glossary#attention-mask)
+ context_input_ids (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Input IDs post-processed from the retrieved documents and the question encoder input_ids by the
+ retriever.
+ context_attention_mask (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
+ Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
+ retriever. If the model has is not initialized with a `retriever` or `input_ids` is not given,
+ `context_input_ids` and `context_attention_mask` have to be provided to the forward pass. They are
+ returned by [`~RagRetriever.__call__`].
+ doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`):
+ Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
+ `question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever` or
+ `input_ids` is not given, `doc_scores` has to be provided to the forward pass. `doc_scores` are
+ returned by [`~RagRetriever.__call__`].
+ do_deduplication (`bool`, *optional*):
+ Whether or not to deduplicate the generations from different context documents for a given input. Has
+ to be set to `False` if used while training with distributed backend.
+ num_return_sequences(`int`, *optional*, defaults to 1):
+ The number of independently computed returned sequences for each element in the batch. Note that this
+ is not the value we pass to the `generator`'s `[`~generation.GenerationMixin.generate`]` function,
+ where we set `num_return_sequences` to `num_beams`.
+ num_beams (`int`, *optional*, defaults to 1):
+ Number of beams for beam search. 1 means no beam search.
+ n_docs (`int`, *optional*, defaults to `config.n_docs`)
+ Number of documents to retrieve and/or number of documents for which to generate an answer.
+ kwargs (`Dict[str, Any]`, *optional*):
+ Additional kwargs will be passed to [`~generation.GenerationMixin.generate`]
+
+ Return:
+ `tf.Tensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated sequences. The
+ second dimension (sequence length) is either equal to `max_length` or shorter if all batches finished early
+ due to the `eos_token_id`.
+ """
+
+ n_docs = n_docs if n_docs is not None else self.config.n_docs
+ do_deduplication = do_deduplication if do_deduplication is not None else self.config.do_deduplication
+ num_doc_return_sequences = (
+ num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences
+ )
+ num_beams = num_beams if num_beams is not None else self.config.num_beams
+
+ assert (
+ input_ids is not None or context_input_ids is not None
+ ), " At least one of input_ids or context_input_ids must be given"
+
+ if self.retriever is not None and context_input_ids is None:
+ question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0]
+ context_input_ids = self.retriever(
+ input_ids,
+ question_hidden_states.numpy(),
+ prefix=self.generator.config.prefix,
+ n_docs=n_docs,
+ return_tensors="tf",
+ )["context_input_ids"]
+
+ hypos = []
+ model_kwargs["num_beams"] = num_beams
+ model_kwargs["num_return_sequences"] = num_beams # put here so that not confused with num_doc_return_sequences
+ model_kwargs["attention_mask"] = None
+
+ batch_size = input_ids.shape[0] if input_ids is not None else context_input_ids.shape[0] // n_docs
+
+ for index in range(batch_size):
+ # first, generate beams from documents:
+ generator_input_ids = context_input_ids[index * n_docs : (index + 1) * n_docs] # (n_docs, max_len)
+
+ output_sequences = self.generator.generate(
+ generator_input_ids,
+ **model_kwargs,
+ ) # n_docs * n_beam, tgt_len
+ if do_deduplication:
+ # do_deduplication -- for TF, work on Eager mode only!
+ output_sequences = tf.stack(list({str(k.numpy().tolist()): k for k in output_sequences}.values()))
+
+ num_candidates = output_sequences.shape[
+ 0
+ ] # after deduplication, this number can be less than n_docs*n_beam
+
+ # then, run model forwards to get nll scores:
+ if input_ids is not None:
+ new_input_ids = tf.tile(input_ids[index : index + 1], (num_candidates, 1))
+ outputs = self(new_input_ids, labels=output_sequences, exclude_bos_score=True)
+ else: # input_ids is None, need context_input_ids/mask and doc_scores
+ assert context_attention_mask is not None, (
+ "Make sure that `context_attention_mask` are passed, if no `input_ids` is set. Alternatively, you"
+ " can set a retriever using the `set_retriever(...)` function."
+ )
+ assert doc_scores is not None, (
+ "Make sure that `doc_scores` are passed, if no `input_ids` is set. Alternatively, you can set a"
+ " retriever using the `set_retriever(...)` function."
+ )
+
+ individual_input_ids = tf.tile(
+ generator_input_ids, (num_candidates, 1)
+ ) # (num_candidates*n_docs, max_len)
+
+ individual_attention_mask = context_attention_mask[index * n_docs : (index + 1) * n_docs]
+ individual_attention_mask = tf.tile(individual_attention_mask, (num_candidates, 1))
+
+ individual_doc_scores = doc_scores[index : (index + 1), :] # doc_scores.shape = [batch, n_docs]
+ individual_doc_scores = tf.tile(individual_doc_scores, (num_candidates, 1)) # [num_candidates, n_docs]
+
+ outputs = self(
+ input_ids=None,
+ context_input_ids=individual_input_ids,
+ context_attention_mask=individual_attention_mask,
+ doc_scores=individual_doc_scores,
+ labels=output_sequences,
+ exclude_bos_score=True,
+ )
+
+ top_cand_inds = tf.math.top_k((-outputs["loss"]), k=num_doc_return_sequences)[1]
+
+ # add hypothesis
+ hypos.append(tf.gather(output_sequences, top_cand_inds))
+
+ return self._cat_and_pad(hypos, pad_token_id=self.config.generator.pad_token_id)
+
+ @staticmethod
+ def _cat_and_pad(tensors, pad_token_id):
+ # used by generate(): tensors is a (batched) list of (candidates, len); len is varied across batch
+
+ # Initialize padded tensor with shape ( all_candidates , max_candidate_length ),
+ # where all_candidates counted from all inputs
+ new_shape = sum([t.shape[0] for t in tensors]), max([t.shape[1] for t in tensors])
+ output = tf.fill(new_shape, pad_token_id)
+
+ # Normal tensor doesn't support slice assignment, so we need tf.Variable
+ output = tf.Variable(output)
+
+ # Assign, and then convert back to tensor
+ ind = 0
+ for t in tensors:
+ output[ind : ind + t.shape[0], : t.shape[1]].assign(t)
+ ind += t.shape[0]
+
+ output = tf.convert_to_tensor(output)
+ return tf.cast(output, tensors[0][0][0].dtype)
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "rag", None) is not None:
+ with tf.name_scope(self.rag.name):
+ self.rag.build(None)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/vit_msn/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/vit_msn/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c36cb750cfa4e6273de0a8a2646236ee14b516d1
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/vit_msn/__init__.py
@@ -0,0 +1,53 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
+
+
+_import_structure = {"configuration_vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"]}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_vit_msn"] = [
+ "VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "ViTMSNModel",
+ "ViTMSNForImageClassification",
+ "ViTMSNPreTrainedModel",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_vit_msn import (
+ VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
+ ViTMSNForImageClassification,
+ ViTMSNModel,
+ ViTMSNPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/vit_msn/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/vit_msn/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6c5f83fa23d080f39bf548ce392eff2aad3397f1
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/vit_msn/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/vit_msn/__pycache__/configuration_vit_msn.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/vit_msn/__pycache__/configuration_vit_msn.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c3b3cd7cc1a270e6d764d5caf3d16d8e47ed402d
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/vit_msn/__pycache__/configuration_vit_msn.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/vit_msn/__pycache__/convert_msn_to_pytorch.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/vit_msn/__pycache__/convert_msn_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5be05295c830533170e3a15e71f3140474e21e64
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/vit_msn/__pycache__/convert_msn_to_pytorch.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/vit_msn/__pycache__/modeling_vit_msn.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/vit_msn/__pycache__/modeling_vit_msn.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6d9d3c5c9dabdb34601a46d2fd6b55378f242769
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/vit_msn/__pycache__/modeling_vit_msn.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/vit_msn/configuration_vit_msn.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/vit_msn/configuration_vit_msn.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ee05e3c393be0ba601e709e11e428c988653b2f
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/vit_msn/configuration_vit_msn.py
@@ -0,0 +1,118 @@
+# coding=utf-8
+# Copyright 2022 Facebook AI and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" ViT MSN model configuration"""
+
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
+ # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
+}
+
+
+class ViTMSNConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`ViTMSNModel`]. It is used to instantiate an ViT
+ MSN model according to the specified arguments, defining the model architecture. Instantiating a configuration with
+ the defaults will yield a similar configuration to that of the ViT
+ [facebook/vit_msn_base](https://huggingface.co/facebook/vit_msn_base) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
+ The epsilon used by the layer normalization layers.
+ image_size (`int`, *optional*, defaults to 224):
+ The size (resolution) of each image.
+ patch_size (`int`, *optional*, defaults to 16):
+ The size (resolution) of each patch.
+ num_channels (`int`, *optional*, defaults to 3):
+ The number of input channels.
+ qkv_bias (`bool`, *optional*, defaults to `True`):
+ Whether to add a bias to the queries, keys and values.
+
+ Example:
+
+ ```python
+ >>> from transformers import ViTMSNModel, ViTMSNConfig
+
+ >>> # Initializing a ViT MSN vit-msn-base style configuration
+ >>> configuration = ViTConfig()
+
+ >>> # Initializing a model from the vit-msn-base style configuration
+ >>> model = ViTMSNModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "vit_msn"
+
+ def __init__(
+ self,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.0,
+ attention_probs_dropout_prob=0.0,
+ initializer_range=0.02,
+ layer_norm_eps=1e-06,
+ image_size=224,
+ patch_size=16,
+ num_channels=3,
+ qkv_bias=True,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.intermediate_size = intermediate_size
+ self.hidden_act = hidden_act
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.qkv_bias = qkv_bias
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/vit_msn/convert_msn_to_pytorch.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/vit_msn/convert_msn_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..899c74f183205e9fdc18984a1f15e877bc64fe31
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/vit_msn/convert_msn_to_pytorch.py
@@ -0,0 +1,245 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert ViT MSN checkpoints from the original repository: https://github.com/facebookresearch/msn"""
+
+import argparse
+import json
+
+import requests
+import torch
+from huggingface_hub import hf_hub_download
+from PIL import Image
+
+from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
+from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
+
+
+torch.set_grad_enabled(False)
+
+
+# here we list all keys to be renamed (original name on the left, our name on the right)
+def create_rename_keys(config, base_model=False):
+ rename_keys = []
+ for i in range(config.num_hidden_layers):
+ # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
+ rename_keys.append((f"module.blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight"))
+ rename_keys.append((f"module.blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias"))
+ rename_keys.append(
+ (f"module.blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight")
+ )
+ rename_keys.append((f"module.blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias"))
+ rename_keys.append((f"module.blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight"))
+ rename_keys.append((f"module.blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias"))
+ rename_keys.append((f"module.blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight"))
+ rename_keys.append((f"module.blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias"))
+ rename_keys.append((f"module.blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight"))
+ rename_keys.append((f"module.blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias"))
+
+ # projection layer + position embeddings
+ rename_keys.extend(
+ [
+ ("module.cls_token", "vit.embeddings.cls_token"),
+ ("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
+ ("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
+ ("module.pos_embed", "vit.embeddings.position_embeddings"),
+ ]
+ )
+
+ if base_model:
+ # layernorm + pooler
+ rename_keys.extend(
+ [
+ ("module.norm.weight", "layernorm.weight"),
+ ("module.norm.bias", "layernorm.bias"),
+ ]
+ )
+
+ # if just the base model, we should remove "vit" from all keys that start with "vit"
+ rename_keys = [(pair[0], pair[1][4:]) if pair[1].startswith("vit") else pair for pair in rename_keys]
+ else:
+ # layernorm + classification head
+ rename_keys.extend(
+ [
+ ("norm.weight", "vit.layernorm.weight"),
+ ("norm.bias", "vit.layernorm.bias"),
+ ("head.weight", "classifier.weight"),
+ ("head.bias", "classifier.bias"),
+ ]
+ )
+
+ return rename_keys
+
+
+# we split up the matrix of each encoder layer into queries, keys and values
+def read_in_q_k_v(state_dict, config, base_model=False):
+ for i in range(config.num_hidden_layers):
+ if base_model:
+ prefix = ""
+ else:
+ prefix = "vit."
+ # read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
+ in_proj_weight = state_dict.pop(f"module.blocks.{i}.attn.qkv.weight")
+ in_proj_bias = state_dict.pop(f"module.blocks.{i}.attn.qkv.bias")
+ # next, add query, keys and values (in that order) to the state dict
+ state_dict[f"{prefix}encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[
+ : config.hidden_size, :
+ ]
+ state_dict[f"{prefix}encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size]
+ state_dict[f"{prefix}encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[
+ config.hidden_size : config.hidden_size * 2, :
+ ]
+ state_dict[f"{prefix}encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[
+ config.hidden_size : config.hidden_size * 2
+ ]
+ state_dict[f"{prefix}encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[
+ -config.hidden_size :, :
+ ]
+ state_dict[f"{prefix}encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :]
+
+
+def remove_classification_head_(state_dict):
+ ignore_keys = ["head.weight", "head.bias"]
+ for k in ignore_keys:
+ state_dict.pop(k, None)
+
+
+def remove_projection_head(state_dict):
+ # projection head is used in the self-supervised pre-training in MSN,
+ # for downstream task it's not needed.
+ ignore_keys = [
+ "module.fc.fc1.weight",
+ "module.fc.fc1.bias",
+ "module.fc.bn1.weight",
+ "module.fc.bn1.bias",
+ "module.fc.bn1.running_mean",
+ "module.fc.bn1.running_var",
+ "module.fc.bn1.num_batches_tracked",
+ "module.fc.fc2.weight",
+ "module.fc.fc2.bias",
+ "module.fc.bn2.weight",
+ "module.fc.bn2.bias",
+ "module.fc.bn2.running_mean",
+ "module.fc.bn2.running_var",
+ "module.fc.bn2.num_batches_tracked",
+ "module.fc.fc3.weight",
+ "module.fc.fc3.bias",
+ ]
+ for k in ignore_keys:
+ state_dict.pop(k, None)
+
+
+def rename_key(dct, old, new):
+ val = dct.pop(old)
+ dct[new] = val
+
+
+def convert_vit_msn_checkpoint(checkpoint_url, pytorch_dump_folder_path):
+ config = ViTMSNConfig()
+ config.num_labels = 1000
+
+ repo_id = "datasets/huggingface/label-files"
+ filename = "imagenet-1k-id2label.json"
+ id2label = json.load(open(hf_hub_download(repo_id, filename), "r"))
+ id2label = {int(k): v for k, v in id2label.items()}
+ config.id2label = id2label
+ config.label2id = {v: k for k, v in id2label.items()}
+
+ if "s16" in checkpoint_url:
+ config.hidden_size = 384
+ config.intermediate_size = 1536
+ config.num_attention_heads = 6
+ elif "l16" in checkpoint_url:
+ config.hidden_size = 1024
+ config.intermediate_size = 4096
+ config.num_hidden_layers = 24
+ config.num_attention_heads = 16
+ config.hidden_dropout_prob = 0.1
+ elif "b4" in checkpoint_url:
+ config.patch_size = 4
+ elif "l7" in checkpoint_url:
+ config.patch_size = 7
+ config.hidden_size = 1024
+ config.intermediate_size = 4096
+ config.num_hidden_layers = 24
+ config.num_attention_heads = 16
+ config.hidden_dropout_prob = 0.1
+
+ model = ViTMSNModel(config)
+
+ state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")["target_encoder"]
+
+ image_processor = ViTImageProcessor(size=config.image_size)
+
+ remove_projection_head(state_dict)
+ rename_keys = create_rename_keys(config, base_model=True)
+
+ for src, dest in rename_keys:
+ rename_key(state_dict, src, dest)
+ read_in_q_k_v(state_dict, config, base_model=True)
+
+ model.load_state_dict(state_dict)
+ model.eval()
+
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+
+ image = Image.open(requests.get(url, stream=True).raw)
+ image_processor = ViTImageProcessor(
+ size=config.image_size, image_mean=IMAGENET_DEFAULT_MEAN, image_std=IMAGENET_DEFAULT_STD
+ )
+ inputs = image_processor(images=image, return_tensors="pt")
+
+ # forward pass
+ torch.manual_seed(2)
+ outputs = model(**inputs)
+ last_hidden_state = outputs.last_hidden_state
+
+ # The following Colab Notebook was used to generate these outputs:
+ # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
+ if "s16" in checkpoint_url:
+ expected_slice = torch.tensor([[-1.0915, -1.4876, -1.1809]])
+ elif "b16" in checkpoint_url:
+ expected_slice = torch.tensor([[14.2889, -18.9045, 11.7281]])
+ elif "l16" in checkpoint_url:
+ expected_slice = torch.tensor([[41.5028, -22.8681, 45.6475]])
+ elif "b4" in checkpoint_url:
+ expected_slice = torch.tensor([[-4.3868, 5.2932, -0.4137]])
+ else:
+ expected_slice = torch.tensor([[-0.1792, -0.6465, 2.4263]])
+
+ # verify logits
+ assert torch.allclose(last_hidden_state[:, 0, :3], expected_slice, atol=1e-4)
+
+ print(f"Saving model to {pytorch_dump_folder_path}")
+ model.save_pretrained(pytorch_dump_folder_path)
+
+ print(f"Saving image processor to {pytorch_dump_folder_path}")
+ image_processor.save_pretrained(pytorch_dump_folder_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--checkpoint_url",
+ default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
+ type=str,
+ help="URL of the checkpoint you'd like to convert.",
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
+ )
+
+ args = parser.parse_args()
+ convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/vit_msn/modeling_vit_msn.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/vit_msn/modeling_vit_msn.py
new file mode 100644
index 0000000000000000000000000000000000000000..6b10eb9f2450595b466cc5d5a7e629d665b20c2e
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/vit_msn/modeling_vit_msn.py
@@ -0,0 +1,690 @@
+# coding=utf-8
+# Copyright 2022 Facebook AI and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch ViT MSN (masked siamese network) model."""
+
+
+import collections.abc
+import math
+from typing import Dict, List, Optional, Set, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import BaseModelOutput, ImageClassifierOutput
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+from .configuration_vit_msn import ViTMSNConfig
+
+
+logger = logging.get_logger(__name__)
+
+
+_CONFIG_FOR_DOC = "ViTMSNConfig"
+_CHECKPOINT_FOR_DOC = "facebook/vit-msn-small"
+VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "facebook/vit-msn-small",
+ # See all ViTMSN models at https://huggingface.co/models?filter=vit_msn
+]
+
+
+class ViTMSNEmbeddings(nn.Module):
+ """
+ Construct the CLS token, position and patch embeddings. Optionally, also the mask token.
+ """
+
+ def __init__(self, config: ViTMSNConfig, use_mask_token: bool = False) -> None:
+ super().__init__()
+
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
+ self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) if use_mask_token else None
+ self.patch_embeddings = ViTMSNPatchEmbeddings(config)
+ num_patches = self.patch_embeddings.num_patches
+ self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size))
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.config = config
+
+ def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
+ """
+ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
+ resolution images.
+
+ Source:
+ https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174
+ """
+
+ num_patches = embeddings.shape[1] - 1
+ num_positions = self.position_embeddings.shape[1] - 1
+ if num_patches == num_positions and height == width:
+ return self.position_embeddings
+ class_pos_embed = self.position_embeddings[:, 0]
+ patch_pos_embed = self.position_embeddings[:, 1:]
+ dim = embeddings.shape[-1]
+ patch_window_height = height // self.config.patch_size
+ patch_window_width = width // self.config.patch_size
+ # we add a small number to avoid floating point error in the interpolation
+ # see discussion at https://github.com/facebookresearch/dino/issues/8
+ patch_window_height, patch_window_width = patch_window_height + 0.1, patch_window_width + 0.1
+ patch_pos_embed = patch_pos_embed.reshape(1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim)
+ patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
+ patch_pos_embed = nn.functional.interpolate(
+ patch_pos_embed,
+ scale_factor=(
+ patch_window_height / math.sqrt(num_positions),
+ patch_window_width / math.sqrt(num_positions),
+ ),
+ mode="bicubic",
+ align_corners=False,
+ )
+ patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
+ return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
+
+ def forward(
+ self,
+ pixel_values: torch.Tensor,
+ bool_masked_pos: Optional[torch.BoolTensor] = None,
+ interpolate_pos_encoding: bool = False,
+ ) -> torch.Tensor:
+ batch_size, num_channels, height, width = pixel_values.shape
+ embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
+
+ if bool_masked_pos is not None:
+ seq_length = embeddings.shape[1]
+ mask_tokens = self.mask_token.expand(batch_size, seq_length, -1)
+ # replace the masked visual tokens by mask_tokens
+ mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
+ embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
+
+ # add the [CLS] token to the embedded patch tokens
+ cls_tokens = self.cls_token.expand(batch_size, -1, -1)
+ embeddings = torch.cat((cls_tokens, embeddings), dim=1)
+
+ # add positional encoding to each token
+ if interpolate_pos_encoding:
+ embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
+ else:
+ embeddings = embeddings + self.position_embeddings
+
+ embeddings = self.dropout(embeddings)
+
+ return embeddings
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTPatchEmbeddings with ViT->ViTMSN
+class ViTMSNPatchEmbeddings(nn.Module):
+ """
+ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
+ `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
+ Transformer.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ image_size, patch_size = config.image_size, config.patch_size
+ num_channels, hidden_size = config.num_channels, config.hidden_size
+
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.num_patches = num_patches
+
+ self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
+
+ def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor:
+ batch_size, num_channels, height, width = pixel_values.shape
+ if num_channels != self.num_channels:
+ raise ValueError(
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
+ f" Expected {self.num_channels} but got {num_channels}."
+ )
+ if not interpolate_pos_encoding:
+ if height != self.image_size[0] or width != self.image_size[1]:
+ raise ValueError(
+ f"Input image size ({height}*{width}) doesn't match model"
+ f" ({self.image_size[0]}*{self.image_size[1]})."
+ )
+ embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2)
+ return embeddings
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTSelfAttention with ViT->ViTMSN
+class ViTMSNSelfAttention(nn.Module):
+ def __init__(self, config: ViTMSNConfig) -> None:
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
+ f"heads {config.num_attention_heads}."
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
+ mixed_query_layer = self.query(hidden_states)
+
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ return outputs
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->ViTMSN
+class ViTMSNSelfOutput(nn.Module):
+ """
+ The residual connection is defined in ViTMSNLayer instead of here (as is the case with other models), due to the
+ layernorm applied before each block.
+ """
+
+ def __init__(self, config: ViTMSNConfig) -> None:
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ return hidden_states
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->ViTMSN
+class ViTMSNAttention(nn.Module):
+ def __init__(self, config: ViTMSNConfig) -> None:
+ super().__init__()
+ self.attention = ViTMSNSelfAttention(config)
+ self.output = ViTMSNSelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads: Set[int]) -> None:
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.attention.query = prune_linear_layer(self.attention.query, index)
+ self.attention.key = prune_linear_layer(self.attention.key, index)
+ self.attention.value = prune_linear_layer(self.attention.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
+ self_outputs = self.attention(hidden_states, head_mask, output_attentions)
+
+ attention_output = self.output(self_outputs[0], hidden_states)
+
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTIntermediate with ViT->ViTMSN
+class ViTMSNIntermediate(nn.Module):
+ def __init__(self, config: ViTMSNConfig) -> None:
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+
+ return hidden_states
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTOutput with ViT->ViTMSN
+class ViTMSNOutput(nn.Module):
+ def __init__(self, config: ViTMSNConfig) -> None:
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ hidden_states = hidden_states + input_tensor
+
+ return hidden_states
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTLayer with ViT->ViTMSN
+class ViTMSNLayer(nn.Module):
+ """This corresponds to the Block class in the timm implementation."""
+
+ def __init__(self, config: ViTMSNConfig) -> None:
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.attention = ViTMSNAttention(config)
+ self.intermediate = ViTMSNIntermediate(config)
+ self.output = ViTMSNOutput(config)
+ self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
+ self_attention_outputs = self.attention(
+ self.layernorm_before(hidden_states), # in ViTMSN, layernorm is applied before self-attention
+ head_mask,
+ output_attentions=output_attentions,
+ )
+ attention_output = self_attention_outputs[0]
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ # first residual connection
+ hidden_states = attention_output + hidden_states
+
+ # in ViTMSN, layernorm is also applied after self-attention
+ layer_output = self.layernorm_after(hidden_states)
+ layer_output = self.intermediate(layer_output)
+
+ # second residual connection is done here
+ layer_output = self.output(layer_output, hidden_states)
+
+ outputs = (layer_output,) + outputs
+
+ return outputs
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTEncoder with ViT->ViTMSN
+class ViTMSNEncoder(nn.Module):
+ def __init__(self, config: ViTMSNConfig) -> None:
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList([ViTMSNLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ) -> Union[tuple, BaseModelOutput]:
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ layer_head_mask,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions)
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+
+class ViTMSNPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = ViTMSNConfig
+ base_model_prefix = "vit"
+ main_input_name = "pixel_values"
+ supports_gradient_checkpointing = True
+
+ # todo: Resort to https://github.com/facebookresearch/msn/blob/main/src/deit.py#L200-#L211
+ # when creating pre-training scripts.
+ def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
+ """Initialize the weights"""
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+VIT_MSN_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`ViTMSNConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+VIT_MSN_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`]
+ for details.
+
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ interpolate_pos_encoding (`bool`, *optional*):
+ Whether to interpolate the pre-trained position encodings.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare ViTMSN Model outputting raw hidden-states without any specific head on top.",
+ VIT_MSN_START_DOCSTRING,
+)
+class ViTMSNModel(ViTMSNPreTrainedModel):
+ def __init__(self, config: ViTMSNConfig, use_mask_token: bool = False):
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = ViTMSNEmbeddings(config, use_mask_token=use_mask_token)
+ self.encoder = ViTMSNEncoder(config)
+
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self) -> ViTMSNPatchEmbeddings:
+ return self.embeddings.patch_embeddings
+
+ def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(VIT_MSN_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: Optional[torch.Tensor] = None,
+ bool_masked_pos: Optional[torch.BoolTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ interpolate_pos_encoding: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[tuple, BaseModelOutput]:
+ r"""
+ bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, ViTMSNModel
+ >>> import torch
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("facebook/vit-msn-small")
+ >>> model = ViTMSNModel.from_pretrained("facebook/vit-msn-small")
+ >>> inputs = image_processor(images=image, return_tensors="pt")
+ >>> with torch.no_grad():
+ ... outputs = model(**inputs)
+ >>> last_hidden_states = outputs.last_hidden_state
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ embedding_output = self.embeddings(
+ pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding
+ )
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+ sequence_output = self.layernorm(sequence_output)
+
+ if not return_dict:
+ head_outputs = (sequence_output,)
+ return head_outputs + encoder_outputs[1:]
+
+ return BaseModelOutput(
+ last_hidden_state=sequence_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+# Caution: We don't have the weights for the classification head yet. This class
+# is here for the users that are interested to fine-tune the base model (ViTMSNModel).
+@add_start_docstrings(
+ """
+ ViTMSN Model with an image classification head on top e.g. for ImageNet.
+ """,
+ VIT_MSN_START_DOCSTRING,
+)
+class ViTMSNForImageClassification(ViTMSNPreTrainedModel):
+ def __init__(self, config: ViTMSNConfig) -> None:
+ super().__init__(config)
+
+ self.num_labels = config.num_labels
+ self.vit = ViTMSNModel(config)
+
+ # Classifier head
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(VIT_MSN_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=ImageClassifierOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ interpolate_pos_encoding: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[tuple, ImageClassifierOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, ViTMSNForImageClassification
+ >>> import torch
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> torch.manual_seed(2) # doctest: +IGNORE_RESULT
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("facebook/vit-msn-small")
+ >>> model = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small")
+
+ >>> inputs = image_processor(images=image, return_tensors="pt")
+ >>> with torch.no_grad():
+ ... logits = model(**inputs).logits
+ >>> # model predicts one of the 1000 ImageNet classes
+ >>> predicted_label = logits.argmax(-1).item()
+ >>> print(model.config.id2label[predicted_label])
+ Kerry blue terrier
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.vit(
+ pixel_values,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ interpolate_pos_encoding=interpolate_pos_encoding,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ logits = self.classifier(sequence_output[:, 0, :])
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return ImageClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff14e5b987a789c86f3ca37e11d79afe540a177e
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__init__.py
@@ -0,0 +1,78 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available
+
+
+_import_structure = {
+ "configuration_xlm_prophetnet": ["XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMProphetNetConfig"],
+}
+
+try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_xlm_prophetnet"] = ["XLMProphetNetTokenizer"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_xlm_prophetnet"] = [
+ "XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "XLMProphetNetDecoder",
+ "XLMProphetNetEncoder",
+ "XLMProphetNetForCausalLM",
+ "XLMProphetNetForConditionalGeneration",
+ "XLMProphetNetModel",
+ "XLMProphetNetPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_xlm_prophetnet import XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMProphetNetConfig
+
+ try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_xlm_prophetnet import XLMProphetNetTokenizer
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_xlm_prophetnet import (
+ XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST,
+ XLMProphetNetDecoder,
+ XLMProphetNetEncoder,
+ XLMProphetNetForCausalLM,
+ XLMProphetNetForConditionalGeneration,
+ XLMProphetNetModel,
+ XLMProphetNetPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9e0910912250043df4d01a980c598c86f5b255bc
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__pycache__/configuration_xlm_prophetnet.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__pycache__/configuration_xlm_prophetnet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..35c47e690947d0fcaf91a99358aaa9f04e7c81c2
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__pycache__/configuration_xlm_prophetnet.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__pycache__/modeling_xlm_prophetnet.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__pycache__/modeling_xlm_prophetnet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7472236e740c28151dc7b161931bdabb90001c22
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__pycache__/modeling_xlm_prophetnet.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__pycache__/tokenization_xlm_prophetnet.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__pycache__/tokenization_xlm_prophetnet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f92cf1ad39463bb55c1212d0bd6b4ded456c8039
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__pycache__/tokenization_xlm_prophetnet.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..88ca83a73226cebf889b0c089fe6784d4c2a2bb9
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py
@@ -0,0 +1,185 @@
+# coding=utf-8
+# Copyright 2020 The Microsoft Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" XLM-ProphetNet model configuration"""
+
+
+from typing import Callable, Optional, Union
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "microsoft/xprophetnet-large-wiki100-cased": (
+ "https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
+ ),
+}
+
+
+class XLMProphetNetConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`XLMProphetNetModel`]. It is used to instantiate a
+ XLMProphetNet model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the XLMProphetNet
+ [microsoft/xprophetnet-large-wiki100-cased](https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased)
+ architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ activation_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for activations inside the fully connected layer.
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ vocab_size (`int`, *optional*, defaults to 30522):
+ Vocabulary size of the ProphetNET model. Defines the number of different tokens that can be represented by
+ the `inputs_ids` passed when calling [`XLMProphetNetModel`].
+ hidden_size (`int`, *optional*, defaults to 1024):
+ Dimensionality of the layers and the pooler layer.
+ encoder_ffn_dim (`int`, *optional*, defaults to 4096):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
+ num_encoder_layers (`int`, *optional*, defaults to 12):
+ Number of encoder layers.
+ num_encoder_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ decoder_ffn_dim (`int`, *optional*, defaults to 4096):
+ Dimensionality of the `intermediate` (often named feed-forward) layer in decoder.
+ num_decoder_layers (`int`, *optional*, defaults to 12):
+ Number of decoder layers.
+ num_decoder_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer decoder.
+ attention_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ init_std (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ add_cross_attention (`bool`, *optional*, defaults to `True`):
+ Whether cross-attention layers should be added to the model.
+ is_encoder_decoder (`bool`, *optional*, defaults to `True`):
+ Whether this is an encoder/decoder model.
+ pad_token_id (`int`, *optional*, defaults to 1)
+ Padding token id.
+ bos_token_id (`int`, *optional*, defaults to 0)
+ Beginning of stream token id.
+ eos_token_id (`int`, *optional*, defaults to 2)
+ End of stream token id.
+ ngram (`int`, *optional*, defaults to 2)
+ Number of future tokens to predict. Set to 1 to be same as traditional Language model to predict next first
+ token.
+ num_buckets (`int`, *optional*, defaults to 32)
+ The number of buckets to use for each attention layer. This is for relative position calculation. See the
+ [T5 paper](see https://arxiv.org/abs/1910.10683) for more details.
+ relative_max_distance (`int`, *optional*, defaults to 128)
+ Relative distances greater than this number will be put into the last same bucket. This is for relative
+ position calculation. See the [T5 paper](see https://arxiv.org/abs/1910.10683) for more details.
+ disable_ngram_loss (`bool`, *optional*, defaults to `False`):
+ Whether be trained predicting only the next first token.
+ eps (`float`, *optional*, defaults to 0.0):
+ Controls the `epsilon` parameter value for label smoothing in the loss calculation. If set to 0, no label
+ smoothing is performed.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models).
+ """
+
+ model_type = "xlm-prophetnet"
+ keys_to_ignore_at_inference = ["past_key_values"]
+ attribute_map = {
+ "num_attention_heads": "num_encoder_attention_heads",
+ }
+
+ def __init__(
+ self,
+ activation_dropout: Optional[float] = 0.1,
+ activation_function: Optional[Union[str, Callable]] = "gelu",
+ vocab_size: Optional[int] = 30522,
+ hidden_size: Optional[int] = 1024,
+ encoder_ffn_dim: Optional[int] = 4096,
+ num_encoder_layers: Optional[int] = 12,
+ num_encoder_attention_heads: Optional[int] = 16,
+ decoder_ffn_dim: Optional[int] = 4096,
+ num_decoder_layers: Optional[int] = 12,
+ num_decoder_attention_heads: Optional[int] = 16,
+ attention_dropout: Optional[float] = 0.1,
+ dropout: Optional[float] = 0.1,
+ max_position_embeddings: Optional[int] = 512,
+ init_std: Optional[float] = 0.02,
+ is_encoder_decoder: Optional[bool] = True,
+ add_cross_attention: Optional[bool] = True,
+ decoder_start_token_id: Optional[int] = 0,
+ ngram: Optional[int] = 2,
+ num_buckets: Optional[int] = 32,
+ relative_max_distance: Optional[int] = 128,
+ disable_ngram_loss: Optional[bool] = False,
+ eps: Optional[float] = 0.0,
+ use_cache: Optional[bool] = True,
+ pad_token_id: Optional[int] = 0,
+ bos_token_id: Optional[int] = 1,
+ eos_token_id: Optional[int] = 2,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.encoder_ffn_dim = encoder_ffn_dim
+ self.num_encoder_layers = num_encoder_layers
+ self.num_encoder_attention_heads = num_encoder_attention_heads
+ self.decoder_ffn_dim = decoder_ffn_dim
+ self.num_decoder_layers = num_decoder_layers
+ self.num_decoder_attention_heads = num_decoder_attention_heads
+ self.max_position_embeddings = max_position_embeddings
+ self.init_std = init_std # Normal(0, this parameter)
+ self.activation_function = activation_function
+
+ # parameters for xlmprophetnet
+ self.ngram = ngram
+ self.num_buckets = num_buckets
+ self.relative_max_distance = relative_max_distance
+ self.disable_ngram_loss = disable_ngram_loss
+ self.eps = eps
+
+ # 3 Types of Dropout
+ self.attention_dropout = attention_dropout
+ self.activation_dropout = activation_dropout
+ self.dropout = dropout
+
+ self.use_cache = use_cache
+
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ is_encoder_decoder=is_encoder_decoder,
+ add_cross_attention=add_cross_attention,
+ decoder_start_token_id=decoder_start_token_id,
+ **kwargs,
+ )
+
+ @property
+ def num_hidden_layers(self) -> int:
+ return self.num_encoder_layers + self.num_decoder_layers
+
+ @num_hidden_layers.setter
+ def num_hidden_layers(self, value):
+ raise NotImplementedError(
+ "This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
+ " `num_decoder_layers`."
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..e705b95b1778772cc283e7f7afa3c863f629b7fa
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py
@@ -0,0 +1,2367 @@
+# coding=utf-8
+# Copyright 2020 The Microsoft Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch XLM-ProphetNet model."""
+
+
+import copy
+import math
+import warnings
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import Tensor, nn
+from torch.nn import LayerNorm
+
+from ...activations import ACT2FN
+from ...modeling_outputs import BaseModelOutput
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_xlm_prophetnet import XLMProphetNetConfig
+
+
+logger = logging.get_logger(__name__)
+
+
+_CONFIG_FOR_DOC = "XLMProphetNetConfig"
+
+XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "microsoft/xprophetnet-large-wiki100-cased",
+ # See all XLMProphetNet models at https://huggingface.co/models?filter=xprophetnet
+]
+
+# Copied from src.transformers.models.prophetnet.modeling_prophetnet.PROPHETNET_START_DOCSTRING with ProphetNetConfig->XLMProphetNetConfig
+XLM_PROPHETNET_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ Original ProphetNet code can be found [here](https://github.com/microsoft/ProphetNet). Checkpoints were converted
+ from original Fairseq checkpoints. For more information on the checkpoint conversion, please take a look at the
+ file `convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py`.
+
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matters related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`XLMProphetNetConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+# Copied from src.transformers.models.prophetnet.modeling_prophetnet.PROPHETNET_INPUTS_DOCSTRING with ProphetNet->XLMProphetNet
+XLM_PROPHETNET_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ XLMProphetNet uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+# Copied from src.transformers.models.prophetnet.modeling_prophetnet.PROPHETNET_STANDALONE_INPUTS_DOCSTRING with ProphetNet->XLMProphetNet
+XLM_PROPHETNET_STANDALONE_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+# Copied from transformers.models.prophetnet.modeling_prophetnet.softmax
+def softmax(hidden_state, dim, onnx_trace=False):
+ if onnx_trace:
+ return nn.functional.softmax(hidden_state.float(), dim=dim)
+ else:
+ return nn.functional.softmax(hidden_state, dim=dim, dtype=torch.float32)
+
+
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ngram_attention_bias
+def ngram_attention_bias(sequence_length, ngram, device, dtype):
+ """
+ This function computes the bias for the predict stream
+ """
+ left_block = (
+ torch.ones((ngram, sequence_length, sequence_length), device=device, dtype=dtype) * torch.finfo(dtype).min
+ )
+ right_block = left_block.detach().clone()
+ # create bias
+ for stream_idx in range(ngram):
+ right_block[stream_idx].fill_diagonal_(0, wrap=False)
+ left_block[stream_idx].triu_(-stream_idx + 1)
+
+ left_block[:, :, 0] = 0
+ return torch.cat([left_block, right_block], dim=2)
+
+
+# Copied from transformers.models.prophetnet.modeling_prophetnet.compute_relative_buckets
+def compute_relative_buckets(num_buckets, max_distance, relative_positions, is_bidirectional=False):
+ """
+ This function computes individual parts of the relative position buckets. For more detail, see paper.
+ """
+ inv_relative_positions = -relative_positions
+ rel_positions_bucket = 0
+
+ if is_bidirectional:
+ num_buckets = num_buckets // 2
+ rel_positions_bucket = (
+ rel_positions_bucket
+ + torch.lt(inv_relative_positions, torch.zeros_like(inv_relative_positions)).int() * num_buckets
+ )
+ inv_relative_positions = torch.abs(inv_relative_positions)
+ else:
+ inv_relative_positions = torch.max(inv_relative_positions, torch.zeros_like(inv_relative_positions))
+
+ max_exact = num_buckets // 2
+ is_small = torch.lt(inv_relative_positions, max_exact)
+ val_if_large = max_exact + torch.log(inv_relative_positions.float() / max_exact) / math.log(
+ max_distance / max_exact
+ ) * (num_buckets - max_exact)
+ val_if_large = torch.min(val_if_large, torch.ones_like(val_if_large) * (num_buckets - 1)).int()
+ rel_positions_bucket = rel_positions_bucket + torch.where(is_small, inv_relative_positions.int(), val_if_large)
+ return rel_positions_bucket
+
+
+# Copied from transformers.models.prophetnet.modeling_prophetnet.compute_all_stream_relative_buckets
+def compute_all_stream_relative_buckets(num_buckets, max_distance, position_ids):
+ """
+ This function computes both main and predict relative position buckets. For more detail, see paper.
+ """
+ # main stream
+ main_stream_relative_positions = position_ids.unsqueeze(1).repeat(1, position_ids.size(-1), 1)
+ main_stream_relative_positions = main_stream_relative_positions - position_ids.unsqueeze(-1)
+
+ # predicting stream
+ predicting_stream_relative_positions = torch.cat((position_ids - 1, position_ids), dim=-1).unsqueeze(1)
+ predicting_stream_relative_positions = predicting_stream_relative_positions.repeat(1, position_ids.size(-1), 1)
+ predicting_stream_relative_positions = predicting_stream_relative_positions - position_ids.unsqueeze(-1)
+
+ # get both position buckets
+ main_relative_position_buckets = compute_relative_buckets(
+ num_buckets, max_distance, main_stream_relative_positions, is_bidirectional=False
+ )
+ predict_relative_position_buckets = compute_relative_buckets(
+ num_buckets, max_distance, predicting_stream_relative_positions, is_bidirectional=False
+ )
+ return main_relative_position_buckets, predict_relative_position_buckets
+
+
+@dataclass
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput with ProphetNet->XLMProphetNet all-casing
+class XLMProphetNetSeq2SeqLMOutput(ModelOutput):
+ """
+ Base class for sequence-to-sequence language models outputs.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, config.vocab_size)`):
+ Prediction scores of the main stream language modeling head (scores for each vocabulary token before
+ SoftMax).
+ logits_ngram (`torch.FloatTensor` of shape `(batch_size, ngram * decoder_sequence_length, config.vocab_size)`):
+ Prediction scores of the predict stream language modeling head (scores for each vocabulary token before
+ SoftMax).
+ past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size,
+ num_attn_heads, decoder_sequence_length, embed_size_per_head)`).
+
+ Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
+ used (see `past_key_values` input) to speed up sequential decoding.
+ decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, decoder_sequence_length, hidden_size)`.
+
+ Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.
+ decoder_ngram_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`.
+
+ Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding
+ outputs.
+ decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ decoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ decoder_ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ decoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the
+ weighted average in the self-attention heads.
+ cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ encoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to
+ compute the weighted average in the
+ encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
+ encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, encoder_sequence_length, hidden_size)`.
+
+ Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
+ encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ encoder_sequence_length, encoder_sequence_length)`. Attentions weights of the encoder, after the attention
+ softmax, used to compute the weighted average in the self-attention heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ logits_ngram: Optional[torch.FloatTensor] = None
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_ngram_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ encoder_last_hidden_state: Optional[torch.FloatTensor] = None
+ encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+ @property
+ def decoder_cross_attentions(self):
+ warnings.warn(
+ "`decoder_cross_attentions` is deprecated and will be removed soon. Please use `cross_attentions`"
+ " instead.",
+ FutureWarning,
+ )
+ return self.cross_attentions
+
+
+@dataclass
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput with ProphetNet->XLMProphetNet all-casing
+class XLMProphetNetSeq2SeqModelOutput(ModelOutput):
+ """
+ Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential
+ decoding.
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, hidden_size)`):
+ Sequence of main stream hidden-states at the output of the last layer of the decoder of the model.
+
+ If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
+ hidden_size)` is output.
+ last_hidden_state_ngram (`torch.FloatTensor` of shape `(batch_size,ngram * decoder_sequence_length, config.vocab_size)`, *optional*):
+ Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model.
+ past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size,
+ num_attn_heads, decoder_sequence_length, embed_size_per_head)`).
+
+ Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
+ used (see `past_key_values` input) to speed up sequential decoding.
+ decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, decoder_sequence_length, hidden_size)`.
+
+ Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.
+ decoder_ngram_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`.
+
+ Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding
+ outputs.
+ decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ decoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ decoder_ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ decoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the
+ weighted average in the
+ cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ encoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to
+ compute the weighted average in the
+ encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
+ encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, encoder_sequence_length, hidden_size)`.
+
+ Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
+ encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ encoder_sequence_length, encoder_sequence_length)`.
+
+ Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ """
+
+ last_hidden_state: torch.FloatTensor
+ last_hidden_state_ngram: Optional[torch.FloatTensor] = None
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_ngram_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ encoder_last_hidden_state: Optional[torch.FloatTensor] = None
+ encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+ @property
+ def decoder_cross_attentions(self):
+ warnings.warn(
+ "`decoder_cross_attentions` is deprecated and will be removed soon. Please use `cross_attentions`"
+ " instead.",
+ FutureWarning,
+ )
+ return self.cross_attentions
+
+
+@dataclass
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput with ProphetNet->XLMProphetNet all-casing
+class XLMProphetNetDecoderModelOutput(ModelOutput):
+ """
+ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, hidden_size)`):
+ Sequence of main stream hidden-states at the output of the last layer of the decoder of the model.
+
+ If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
+ hidden_size)` is output.
+ last_hidden_state_ngram (`torch.FloatTensor` of shape `(batch_size, ngram * decoder_sequence_length, config.vocab_size)`):
+ Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model.
+ past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size,
+ num_attn_heads, decoder_sequence_length, embed_size_per_head)`).
+
+ Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
+ used (see `past_key_values` input) to speed up sequential decoding.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, decoder_sequence_length, hidden_size)`.
+
+ Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.
+ ngram_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`.
+
+ Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding
+ outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ decoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ decoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the
+ weighted average in the
+ cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ encoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to
+ compute the weighted average in the
+ """
+
+ last_hidden_state: torch.FloatTensor
+ last_hidden_state_ngram: Optional[torch.FloatTensor] = None
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ hidden_states_ngram: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+ ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput with ProphetNet->XLMProphetNet all-casing
+class XLMProphetNetDecoderLMOutput(ModelOutput):
+ """
+ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, config.vocab_size)`):
+ Prediction scores of the main stream language modeling head (scores for each vocabulary token before
+ SoftMax).
+ logits_ngram (`torch.FloatTensor` of shape `(batch_size, ngram * decoder_sequence_length, config.vocab_size)`):
+ Prediction scores of the predict stream language modeling head (scores for each vocabulary token before
+ SoftMax).
+ past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size,
+ num_attn_heads, decoder_sequence_length, embed_size_per_head)`).
+
+ Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
+ used (see `past_key_values` input) to speed up sequential decoding.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, decoder_sequence_length, hidden_size)`.
+
+ Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.
+ ngram_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`.
+
+ Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding
+ outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ decoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ decoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the
+ weighted average in the
+ cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ encoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to
+ compute the weighted average in the
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ logits_ngram: Optional[torch.FloatTensor] = None
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ hidden_states_ngram: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+ ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetPreTrainedModel with ProphetNet->XLMProphetNet
+class XLMProphetNetPreTrainedModel(PreTrainedModel):
+ config_class = XLMProphetNetConfig
+ base_model_prefix = "prophetnet"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=self.config.init_std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.init_std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+ def _shift_right(self, input_ids):
+ decoder_start_token_id = self.config.decoder_start_token_id
+ pad_token_id = self.config.pad_token_id
+
+ assert decoder_start_token_id is not None, (
+ "self.model.config.decoder_start_token_id has to be defined. In XLMProphetNet it is usually set to the"
+ " pad_token_id. See XLMProphetNet docs for more information"
+ )
+
+ # shift inputs to the right
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
+ shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
+ shifted_input_ids[..., 0] = decoder_start_token_id
+
+ assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
+ # replace possible -100 values in labels by `pad_token_id`
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
+
+ assert torch.all(shifted_input_ids >= 0).item(), "Verify that `shifted_input_ids` has only positive values"
+
+ return shifted_input_ids
+
+
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetPositionalEmbeddings with ProphetNet->XLMProphetNet
+class XLMProphetNetPositionalEmbeddings(nn.Embedding):
+ """
+ This module learns positional embeddings up to a fixed maximum size. Padding ids are ignored by either offsetting
+ based on padding_idx or by setting padding_idx to None and ensuring that the appropriate position ids are passed to
+ the forward function.
+ """
+
+ def __init__(self, config: XLMProphetNetConfig) -> None:
+ self.max_length = config.max_position_embeddings
+ super().__init__(config.max_position_embeddings, config.hidden_size, config.pad_token_id)
+
+ def forward(self, inputs_shape, device, attention_mask=None, past_key_values=None, position_ids=None):
+ assert (position_ids is None) or (
+ self.padding_idx is None
+ ), "If position_ids is pre-computed then padding_idx should not be set."
+
+ if position_ids is None:
+ if past_key_values is not None:
+ # position_ids is the same for every token when decoding a single step
+ # Without the int() cast, it doesn't work in some cases when exporting to ONNX
+ prev_num_input_ids = past_key_values[0][0].shape[2]
+ num_input_ids = inputs_shape[1] + prev_num_input_ids
+ position_ids = torch.ones((1, 1), dtype=torch.long, device=device) * (
+ int(self.padding_idx + num_input_ids)
+ )
+ else:
+ if attention_mask is None:
+ attention_mask = torch.ones(inputs_shape, dtype=torch.long, device=device)
+
+ # retrieve position_ids from input_ids / attention_mask
+ position_ids = (
+ torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask
+ ).long() + self.padding_idx
+
+ # make sure position_ids are not bigger then max_length
+ position_ids = position_ids.clamp(0, self.max_length - 1)
+
+ return super().forward(position_ids), position_ids
+
+ def _forward(self, position_ids):
+ return super().forward(position_ids)
+
+
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetAttention with ProphetNet->XLMProphetNet
+class XLMProphetNetAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(
+ self,
+ config: XLMProphetNetConfig,
+ num_attn_heads: int,
+ ):
+ super().__init__()
+ hidden_size = config.hidden_size
+
+ self.attention_dropout = config.attention_dropout
+ self.dropout = config.dropout
+ self.num_attn_heads = num_attn_heads
+ self.head_dim = hidden_size // num_attn_heads
+
+ assert self.head_dim * num_attn_heads == hidden_size, (
+ "`config.hidden_size` must be divisible by `config.num_encoder_attention_heads` and"
+ " `config.num_decoder_attention_heads`"
+ )
+
+ self.key_proj = nn.Linear(hidden_size, hidden_size)
+ self.value_proj = nn.Linear(hidden_size, hidden_size)
+ self.query_proj = nn.Linear(hidden_size, hidden_size)
+
+ self.out_proj = nn.Linear(hidden_size, hidden_size)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_attn_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states,
+ key_value_states: Optional[Tensor] = None,
+ attention_mask: Optional[Tensor] = None,
+ layer_head_mask: Optional[Tensor] = None,
+ past_key_value: Optional[Tuple[Tensor]] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[Tensor, Optional[Tensor]]:
+ batch_size, tgt_len, hidden_size = hidden_states.size()
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+ assert list(hidden_states.size()) == [
+ batch_size,
+ tgt_len,
+ hidden_size,
+ ], f"Size of hidden states should be {batch_size, tgt_len, hidden_size}, but is {hidden_states.size()}"
+
+ # previous time steps are cached - no need to recompute key and value if they are static
+ query_states = self.query_proj(hidden_states) / (self.head_dim**0.5)
+
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.key_proj(key_value_states), -1, batch_size)
+ value_states = self._shape(self.value_proj(key_value_states), -1, batch_size)
+ else:
+ # self_attention
+ key_states = self._shape(self.key_proj(hidden_states), -1, batch_size)
+ value_states = self._shape(self.value_proj(hidden_states), -1, batch_size)
+
+ if is_cross_attention:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ # project states into the correct shape
+ proj_shape = (batch_size, self.num_attn_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, batch_size).view(*proj_shape)
+ key_states = key_states.view(*proj_shape)
+ value_states = value_states.view(*proj_shape)
+ src_len = key_states.size(2)
+ attn_weights = torch.einsum("bsij,bsjk->bsik", query_states, key_states.transpose(2, 3))
+ expected_shape = (batch_size, self.num_attn_heads, tgt_len, src_len)
+ if attn_weights.size() != expected_shape:
+ raise ValueError(f"Attention weights should have size {expected_shape}, but is {attn_weights.size()}")
+
+ # This is part of a workaround to get around fork/join parallelism not supporting Optional types.
+ if attention_mask is not None and attention_mask.dim() == 0:
+ attention_mask = None
+
+ expected_shape = (batch_size, self.num_attn_heads, 1, src_len)
+ if attention_mask is not None and attention_mask.size() != expected_shape:
+ raise ValueError(f"Attention mask should have size {expected_shape}, but is {attention_mask.size()}")
+ if attention_mask is not None: # don't attend to padding symbols
+ attn_weights = attn_weights + attention_mask
+ if output_attentions:
+ attn_weights_reshaped = attn_weights
+ else:
+ attn_weights_reshaped = None
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if layer_head_mask is not None:
+ assert layer_head_mask.size() == (self.num_attn_heads,), (
+ f"Head mask for a single layer should be of size {(self.num_attn_heads,)}, but is"
+ f" {layer_head_mask.size()}"
+ )
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(
+ batch_size, self.num_attn_heads, tgt_len, src_len
+ )
+
+ # apply head_mask also on attn_weights_reshaped which is used for n-gram attention inside the model
+ attn_weights_reshaped = layer_head_mask.view(1, -1, 1, 1) * attn_weights_reshaped
+
+ attn_probs = nn.functional.dropout(
+ attn_weights,
+ p=self.attention_dropout,
+ training=self.training,
+ )
+ attn_output = torch.einsum("bsij,bsjk->bsik", attn_probs, value_states)
+ expected_shape = (batch_size, self.num_attn_heads, tgt_len, self.head_dim)
+ if attn_output.size() != expected_shape:
+ raise ValueError(f"`attn_output` should have shape {expected_shape}, but is of shape {attn_output.size()}")
+
+ attn_output = attn_output.transpose(1, 2).reshape(batch_size, tgt_len, hidden_size)
+ attn_output = self.out_proj(attn_output)
+
+ attn_output = nn.functional.dropout(attn_output, p=self.dropout, training=self.training)
+ return attn_output, attn_weights_reshaped, past_key_value
+
+
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetFeedForward with ProphetNet->XLMProphetNet
+class XLMProphetNetFeedForward(nn.Module):
+ """
+ This is the residual two feed-forward layer block based on the original Transformer implementation.
+ """
+
+ def __init__(self, config: XLMProphetNetConfig, ffn_dim: int):
+ super().__init__()
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.intermediate = nn.Linear(config.hidden_size, ffn_dim)
+ self.output = nn.Linear(ffn_dim, config.hidden_size)
+ self.activation_dropout = config.activation_dropout
+ self.dropout = config.dropout
+
+ def forward(self, hidden_states):
+ hidden_states = self.intermediate(hidden_states)
+ hidden_states = self.activation_fn(hidden_states)
+
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+ hidden_states = self.output(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ return hidden_states
+
+
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetNgramSelfAttention with ProphetNet->XLMProphetNet
+class XLMProphetNetNgramSelfAttention(nn.Module):
+ def __init__(self, config: XLMProphetNetConfig):
+ super().__init__()
+ self.hidden_size = config.hidden_size
+
+ self.num_buckets = config.num_buckets
+ self.relative_max_distance = config.relative_max_distance
+ self.num_attn_heads = config.num_decoder_attention_heads
+ self.dropout = config.dropout
+ self.attention_dropout = config.attention_dropout
+ self.head_dim = config.hidden_size // self.num_attn_heads
+ self.ngram = config.ngram
+
+ assert (
+ self.head_dim * self.num_attn_heads == config.hidden_size
+ ), "config.hidden_size must be divisible by num_attn_heads"
+ # key, value, query projection
+ self.key_proj = nn.Linear(config.hidden_size, config.hidden_size)
+ self.value_proj = nn.Linear(config.hidden_size, config.hidden_size)
+ self.query_proj = nn.Linear(config.hidden_size, config.hidden_size)
+
+ # out projection
+ self.out_proj = nn.Linear(config.hidden_size, config.hidden_size)
+
+ # rel position embeddings
+ self.relative_pos_embeddings = nn.Linear(config.hidden_size, self.num_buckets * self.num_attn_heads)
+
+ # for onnx runtime
+ self.onnx_trace = False
+
+ def _shape(self, tensor, seq_len, batch_size):
+ return tensor.view(batch_size, seq_len, self.num_attn_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def prepare_for_onnx_export_(self):
+ self.onnx_trace = True
+
+ def forward(
+ self,
+ hidden_states,
+ past_key_value: Optional[Tuple[Tensor]] = None,
+ attention_mask=None,
+ layer_head_mask=None,
+ extended_predict_attention_mask=None,
+ main_relative_position_buckets=None,
+ predict_relative_position_buckets=None,
+ position_ids=None,
+ ):
+ batch_size, ngram_sequence_length, hidden_size = hidden_states.size()
+ assert list(hidden_states.size()) == [batch_size, ngram_sequence_length, hidden_size], (
+ f"`hidden_states` should be of shape {batch_size, ngram_sequence_length, hidden_size}, but is of shape"
+ f" {hidden_states.shape}"
+ )
+
+ # project
+ query_states = self.query_proj(hidden_states)
+ key_states = self.key_proj(hidden_states)
+ value_states = self.value_proj(hidden_states)
+
+ # normalize
+ query_states = query_states / (self.head_dim**0.5)
+
+ # reshape
+ query_states = self._shape(query_states, ngram_sequence_length, batch_size)
+ key_states = self._shape(key_states, -1, batch_size)
+ value_states = self._shape(value_states, -1, batch_size)
+ proj_shape = (batch_size, self.num_attn_heads, -1, self.head_dim)
+
+ query_states = query_states.view(*proj_shape)
+ key_states = key_states.view(*proj_shape)
+ value_states = value_states.view(*proj_shape)
+
+ # chunk into main stream and predict stream
+ hidden_states_list = hidden_states.chunk(1 + self.ngram, dim=1)
+ query_states_list = query_states.chunk(1 + self.ngram, dim=2)
+ key_states_list = key_states.chunk(1 + self.ngram, dim=2)
+ value_states_list = value_states.chunk(1 + self.ngram, dim=2)
+
+ main_hidden_states, hidden_states_predict_list = hidden_states_list[0], hidden_states_list[1:]
+ main_query_states, predict_query_states_list = query_states_list[0], query_states_list[1:]
+ main_key_states, predict_key_states_list = key_states_list[0], key_states_list[1:]
+ main_value_states, predict_value_states_list = value_states_list[0], value_states_list[1:]
+
+ # saved states are stored with shape (batch_size, num_attn_heads, seq_len, head_dim)
+ if past_key_value is not None:
+ prev_main_key_states = past_key_value[0]
+ main_key_states = torch.cat((prev_main_key_states, main_key_states), dim=2)
+ prev_main_value_states = past_key_value[1]
+ main_value_states = torch.cat((prev_main_value_states, main_value_states), dim=2)
+
+ # Update cache
+ past_key_value = (main_key_states, main_value_states)
+
+ # get seq_length of main stream only
+ sequence_length = ngram_sequence_length // (1 + self.ngram)
+
+ # MAIN-STREAM
+ # main attn weights
+ # [batch_size, number_heads, sequence_length, head_dimesion]
+ # x [batch_size, number_heads, head_dimesion, sequence_length]
+ # -> [batch_size, number_heads, sequence_length, sequence_length]
+ main_attn_weights = torch.einsum("bntc,bncs->bnts", main_query_states, main_key_states.transpose(2, 3))
+
+ # retrieve relative position embeddings for each layer -> see paper for more details
+ main_relative_pos_embeddings = self.get_main_relative_pos_embeddings(
+ main_hidden_states, main_attn_weights, position_ids, main_relative_position_buckets
+ )
+
+ main_attn_weights = main_attn_weights + main_relative_pos_embeddings
+
+ if attention_mask is not None:
+ main_attn_weights = main_attn_weights + attention_mask
+
+ main_attn_probs = softmax(
+ main_attn_weights,
+ dim=-1,
+ onnx_trace=self.onnx_trace,
+ ).type_as(main_attn_weights)
+
+ if layer_head_mask is not None:
+ assert layer_head_mask.size() == (self.num_attn_heads,), (
+ f"Head mask for a single layer should be of size {(self.num_attn_heads,)}, but is"
+ f" {layer_head_mask.size()}"
+ )
+ main_attn_probs = layer_head_mask.view(1, -1, 1, 1) * main_attn_probs.view(
+ batch_size, self.num_attn_heads, -1, sequence_length
+ )
+
+ main_attn_probs = nn.functional.dropout(main_attn_probs, p=self.attention_dropout, training=self.training)
+ # project to attn_output
+ # [batch_size, number_heads, sequence_length, sequence_length]
+ # x [batch_size, number_heads, sequence_length, head_dimesion]
+ # -> [batch_size, number_heads, sequence_length, head_dimesion]
+ main_attn_output = torch.einsum("bntc,bncs->bnts", main_attn_probs, main_value_states)
+ # reshape so that num_heads dim is merged into last `head_dim` axis
+ main_attn_output = main_attn_output.transpose(1, 2).reshape(batch_size, 1, sequence_length, hidden_size)
+ main_attn_output = self.out_proj(main_attn_output)
+
+ # PREDICT-STREAM
+ # [batch_size, ngram, number_heads, sequence_length, head_dimesion]
+ predict_query_states = torch.stack(predict_query_states_list, 1).view(
+ batch_size, self.ngram, self.num_attn_heads, sequence_length, self.head_dim
+ )
+
+ # [batch_size, ngram, number_heads, 2*sequence_length, head_dimesion]
+ predict_key_states = torch.stack([torch.cat([main_key_states, key], 2) for key in predict_key_states_list], 1)
+
+ # [batch_size, sequence_length, ngram, hidden_size]
+ predict_hidden_states = torch.stack(hidden_states_predict_list, dim=2)
+
+ # [batch_size, number_heads, ngram, 2*sequence_length, head_dimesion]
+ predict_value_states = torch.cat(
+ [torch.cat([main_value_states, v_p], 2).unsqueeze(2) for v_p in predict_value_states_list], 2
+ )
+
+ # [batch_size, ngram, number_heads, sequence_length, head_dimesion]
+ # x [batch_size, ngram, number_heads, 2*sequence_length, head_dimesion]
+ # -> [batch_size, ngram, number_heads, sequence_length, 2*sequence_length]
+ predict_attn_weights = torch.einsum("bnhtc,bnhsc->bnhts", (predict_query_states, predict_key_states))
+
+ # retrieve relative position embeddings for each layer -> see paper for more details
+ # [batch_size, ngram, number_heads, sequence_length, predict_relative_pos_embeddings]
+ predict_relative_pos_embeddings = self.get_predict_relative_pos_embeddings(
+ predict_hidden_states, predict_attn_weights, position_ids, predict_relative_position_buckets
+ )
+
+ # [batch_size, ngram, number_heads, sequence_length, 2*sequence_length]
+ predict_attn_weights = predict_attn_weights + predict_relative_pos_embeddings
+
+ if extended_predict_attention_mask is not None:
+ # Permuting Predict attention mask to [batch_size, ngram, number_heads, sequence_length, 2*sequence_length]
+ extended_predict_attention_mask = extended_predict_attention_mask.permute(0, 2, 1, 3, 4)
+ extended_predict_attention_mask = extended_predict_attention_mask.to(predict_attn_weights.dtype)
+ predict_attn_weights = predict_attn_weights + extended_predict_attention_mask
+
+ predict_attn_probs = softmax(
+ predict_attn_weights,
+ dim=-1,
+ onnx_trace=self.onnx_trace,
+ ).type_as(predict_attn_weights)
+
+ if layer_head_mask is not None:
+ assert layer_head_mask.size() == (self.num_attn_heads,), (
+ f"Head mask for a single layer should be of size {(self.num_attn_heads,)}, but is"
+ f" {layer_head_mask.size()}"
+ )
+ predict_attn_probs = layer_head_mask.view(1, 1, -1, 1, 1) * predict_attn_probs
+
+ predict_attn_probs = nn.functional.dropout(
+ predict_attn_probs, p=self.attention_dropout, training=self.training
+ )
+ # project to attention output
+ # [batch_size, ngram, number_heads, sequence_length, 2*sequence_length]
+ # x [batch_size, ngram, number_heads, 2*sequence_length, head_dimesion]
+ # -> [batch_size, ngram, number_heads, sequence_length, head_dimesion]
+ predict_attn_output = torch.einsum(
+ "bnhts,bnhsc->bnhtc", (predict_attn_probs, predict_value_states.transpose(1, 2))
+ )
+
+ # reshape so that num_heads dim is merged into last `head_dim` axis
+ # [batch_size, ngram, number_heads, sequence_length, head_dimesion] -> [batch_size, ngram, sequence_length, hidden_size]
+ predict_attn_output = predict_attn_output.transpose(2, 3)
+ predict_attn_output = predict_attn_output.reshape(batch_size, self.ngram, sequence_length, hidden_size)
+ predict_attn_output = self.out_proj(predict_attn_output)
+
+ # concat to single attn output
+ # [batch_size, (1+ngram)*sequence_length, hidden_size]
+ attn_output = torch.cat([main_attn_output, predict_attn_output], 1).view(batch_size, -1, hidden_size)
+ # reshape into better form for `config.output_attentions`
+ main_attn_probs = main_attn_probs.view(batch_size, self.num_attn_heads, sequence_length, -1)
+
+ attn_output = nn.functional.dropout(attn_output, p=self.dropout, training=self.training)
+
+ return attn_output, main_attn_probs, predict_attn_probs, past_key_value
+
+ def get_main_relative_pos_embeddings(
+ self, hidden_states, attn_weights, position_ids, main_relative_position_buckets
+ ):
+ # input hidden_states [batch_size, sequence_length, hidden_size]
+ # input attn_weights [batch_size, num_heads, sequence_length, sequence_length]
+ # input position_ids [batch_size, sequence_length] or [1,1]
+ batch_size, num_attn_heads, tgt_len, src_len = attn_weights.shape
+ attn_weights = attn_weights.view(batch_size, num_attn_heads, tgt_len, src_len)
+ if main_relative_position_buckets is None:
+ batch_size, sequence_length = hidden_states.shape[:2]
+ relative_positions = (
+ torch.arange(1, attn_weights.shape[-1] + 1)
+ .unsqueeze(0)
+ .unsqueeze(0)
+ .repeat(batch_size, sequence_length, 1)
+ .to(position_ids.device)
+ )
+ # [batch_size, sequence_length, sequence_length+1]
+ relative_positions = relative_positions - position_ids.unsqueeze(0).repeat(batch_size, sequence_length, 1)
+ main_relative_position_buckets = compute_relative_buckets(
+ self.num_buckets, self.relative_max_distance, relative_positions, False
+ )
+
+ # [batch_size, sequence_length, num_buckets * num_heads]
+ rel_pos_embeddings = self.relative_pos_embeddings(hidden_states)
+ rel_pos_embeddings = rel_pos_embeddings.view(
+ rel_pos_embeddings.shape[:2] + (self.num_buckets, self.num_attn_heads)
+ )
+ rel_pos_embeddings = rel_pos_embeddings.permute(0, 3, 1, 2)
+ # [batch_size, num_heads, sequence_length, num_buckets]
+ rel_pos_embeddings = rel_pos_embeddings.reshape(attn_weights.shape[:3] + (-1,))
+
+ main_relative_position_buckets = main_relative_position_buckets.repeat(1, self.num_attn_heads, 1)
+ # [batch_size * num_heads * sequence_length, sequence_length]
+ main_relative_position_buckets = main_relative_position_buckets.view(
+ -1, main_relative_position_buckets.shape[-1]
+ )
+ main_relative_position_buckets = main_relative_position_buckets.long()
+ # [batch_size * num_heads * sequence_length, sequence_length]
+ rel_pos_embeddings = rel_pos_embeddings.reshape(-1, rel_pos_embeddings.size(-1))
+
+ main_relative_pos_embeddings = torch.gather(rel_pos_embeddings, dim=1, index=main_relative_position_buckets)
+ main_relative_pos_embeddings = main_relative_pos_embeddings.view(batch_size, num_attn_heads, tgt_len, -1)
+ return main_relative_pos_embeddings
+
+ def get_predict_relative_pos_embeddings(
+ self, hidden_states, attn_weights, position_ids, predict_relative_position_buckets
+ ):
+ # input hidden_states [batch_size, sequence_length, ngram, hidden_size]
+ # input attn_weights [batch_size, ngram, num_heads, sequence_length, 2*sequence_length]
+ # input position_ids [batch_size, sequence_length] or [1,1]
+ # input predict_relative_position_buckets [batch_size, sequence_length, 2*sequence_length] or None
+ batch_size, sequence_length = hidden_states.shape[0:2]
+
+ if predict_relative_position_buckets is None:
+ key_sequence_length = attn_weights.shape[-1]
+ assert (
+ position_ids[0][0] == key_sequence_length - 1
+ ), "`position_ids` are incorrect. They should be of the format 1 2 3 4 5 ... (key_sequence_length - 1)"
+ relative_positions = (
+ torch.arange(0, key_sequence_length)
+ .unsqueeze(0)
+ .unsqueeze(0)
+ .repeat(batch_size, sequence_length, 1)
+ .to(position_ids.device)
+ )
+
+ relative_positions = relative_positions - position_ids.unsqueeze(0).repeat(batch_size, sequence_length, 1)
+ predict_relative_position_buckets = compute_relative_buckets(
+ self.num_buckets, self.relative_max_distance, relative_positions, False
+ )
+
+ # [batch_size, ngram, sequence_length, hidden_size]
+ hidden_states = hidden_states.transpose(1, 2)
+ rel_pos_embeddings = self.relative_pos_embeddings(hidden_states)
+
+ # [batch_size, ngram, sequence_length, num_buckets, num_heads]
+ rel_pos_embeddings = rel_pos_embeddings.view(
+ hidden_states.shape[:-1] + (self.num_buckets, self.num_attn_heads)
+ )
+ rel_pos_embeddings = rel_pos_embeddings.permute(0, 2, 1, 4, 3)
+ # [batch_size * ngram * sequence_length * num_heads, num_buckets]
+ rel_pos_embeddings = rel_pos_embeddings.reshape(-1, self.num_buckets)
+ # [ngram, batch_size, num_heads * sequence_length, -1]
+ predict_relative_position_buckets = predict_relative_position_buckets.unsqueeze(0)
+ predict_relative_position_buckets = predict_relative_position_buckets.repeat(
+ self.ngram, 1, self.num_attn_heads, 1
+ )
+ # [ngram * batch_size * num_heads * sequence_length, -1]
+ predict_relative_position_buckets = predict_relative_position_buckets.view(
+ -1, predict_relative_position_buckets.size(-1)
+ ).long()
+
+ predict_relative_pos_embeddings = torch.gather(
+ rel_pos_embeddings, dim=1, index=predict_relative_position_buckets
+ )
+
+ # [batch_size, gram, num_heads, sequence_length, -1]
+ predict_relative_pos_embeddings = predict_relative_pos_embeddings.view(
+ batch_size, self.ngram, self.num_attn_heads, sequence_length, -1
+ )
+
+ return predict_relative_pos_embeddings
+
+
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetEncoderLayer with ProphetNet->XLMProphetNet, Prophetnet->XLMProphetnet
+class XLMProphetNetEncoderLayer(nn.Module):
+ """
+ Encoder block for XLMProphetnet
+ """
+
+ def __init__(self, config: XLMProphetNetConfig):
+ super().__init__()
+ # 1st residual block
+ self.self_attn = XLMProphetNetAttention(config, config.num_encoder_attention_heads)
+ self.self_attn_layer_norm = LayerNorm(config.hidden_size)
+
+ # 2nd residual block
+ self.feed_forward = XLMProphetNetFeedForward(config, config.encoder_ffn_dim)
+ self.feed_forward_layer_norm = LayerNorm(config.hidden_size)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ output_attentions: bool = False,
+ ):
+ # 1st residual block
+ attention_output, attn_weights, _ = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = self.self_attn_layer_norm(attention_output + hidden_states)
+
+ # 2nd residual block
+ feed_forward_output = self.feed_forward(hidden_states)
+ hidden_states = self.feed_forward_layer_norm(feed_forward_output + hidden_states)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLayer with Prophetnet->XLMProphetnet, ProphetNet->XLMProphetNet
+class XLMProphetNetDecoderLayer(nn.Module):
+ """
+ Decoder block for XLMProphetnet
+ """
+
+ def __init__(self, config: XLMProphetNetConfig):
+ super().__init__()
+ # 1st residual block
+ self.self_attn = XLMProphetNetNgramSelfAttention(config)
+ self.self_attn_layer_norm = LayerNorm(config.hidden_size)
+
+ # 2nd residual block
+ if config.add_cross_attention:
+ self.cross_attn = XLMProphetNetAttention(config, config.num_decoder_attention_heads)
+ self.cross_attn_layer_norm = LayerNorm(config.hidden_size)
+
+ # 3rd residual block
+ self.feed_forward = XLMProphetNetFeedForward(config, config.decoder_ffn_dim)
+ self.feed_forward_layer_norm = LayerNorm(config.hidden_size)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ encoder_hidden_states=None,
+ encoder_attn_mask=None,
+ layer_head_mask=None,
+ cross_attn_layer_head_mask=None,
+ extended_predict_attention_mask=None,
+ main_relative_position_buckets=None,
+ predict_relative_position_buckets=None,
+ position_ids=None,
+ past_key_value=None,
+ use_cache: bool = True,
+ output_attentions: bool = False,
+ ):
+ # 1st residual block
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ ngram_attention_output, self_attn_weights, self_attn_weights_ngram, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ past_key_value=self_attn_past_key_value,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ extended_predict_attention_mask=extended_predict_attention_mask,
+ main_relative_position_buckets=main_relative_position_buckets,
+ predict_relative_position_buckets=predict_relative_position_buckets,
+ position_ids=position_ids,
+ )
+ hidden_states = self.self_attn_layer_norm(hidden_states + ngram_attention_output)
+
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ cross_attn_weights = None
+ if encoder_hidden_states is not None:
+ # 2nd residual block
+ attention_output, cross_attn_weights, cross_attn_present_key_value = self.cross_attn(
+ hidden_states=hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attn_mask,
+ layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=cross_attn_past_key_value,
+ output_attentions=output_attentions,
+ )
+ hidden_states = self.cross_attn_layer_norm(attention_output + hidden_states)
+
+ # add cross-attn to positions 3,4 of present_key_value tuple
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ # 3rd residual block
+ feed_forward_output = self.feed_forward(hidden_states)
+ hidden_states = self.feed_forward_layer_norm(feed_forward_output + hidden_states)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights, self_attn_weights_ngram, cross_attn_weights)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ return outputs
+
+
+@add_start_docstrings(
+ "The standalone encoder part of the XLMProphetNetModel.",
+ XLM_PROPHETNET_START_DOCSTRING,
+)
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetEncoder with microsoft/prophetnet-large-uncased->patrickvonplaten/xprophetnet-large-uncased-standalone, ProphetNet->XLMProphetNet, PROPHETNET->XLM_PROPHETNET
+class XLMProphetNetEncoder(XLMProphetNetPreTrainedModel):
+ r"""
+ word_embeddings (`torch.nn.Embeddings` of shape `(config.vocab_size, config.hidden_size)`, *optional*):
+ The word embedding parameters. This can be used to initialize [`XLMProphetNetEncoder`] with pre-defined word
+ embeddings instead of randomly initialized word embeddings.
+ """
+
+ def __init__(self, config: XLMProphetNetConfig, word_embeddings: nn.Embedding = None):
+ super().__init__(config)
+
+ self.word_embeddings = (
+ word_embeddings
+ if word_embeddings is not None
+ else nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ )
+ self.position_embeddings = XLMProphetNetPositionalEmbeddings(config)
+ self.embeddings_layer_norm = LayerNorm(config.hidden_size)
+
+ self.layers = nn.ModuleList([XLMProphetNetEncoderLayer(config) for _ in range(config.num_encoder_layers)])
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.word_embeddings = value
+
+ @add_start_docstrings_to_model_forward(XLM_PROPHETNET_STANDALONE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, XLMProphetNetEncoder
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone")
+ >>> model = XLMProphetNetEncoder.from_pretrained("patrickvonplaten/prophetnet-large-uncased-standalone")
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
+ >>> outputs = model(**inputs)
+
+ >>> last_hidden_states = outputs.last_hidden_state
+ ```"""
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is None and inputs_embeds is None:
+ raise ValueError("Either input_ids or inputs_embeds has to be passed.")
+ elif input_ids is not None and inputs_embeds is not None:
+ raise ValueError("Make sure to only pass input_ids or inputs_embeds.")
+ elif input_ids is not None and inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+
+ # prepare attention mask
+ if attention_mask is not None:
+ extended_attention_mask = (
+ 1.0 - attention_mask[:, None, None, :].repeat(1, self.config.num_encoder_attention_heads, 1, 1)
+ ) * torch.finfo(self.dtype).min
+ extended_attention_mask = extended_attention_mask.to(inputs_embeds.dtype)
+ else:
+ extended_attention_mask = None
+
+ position_embeddings, position_ids = self.position_embeddings(inputs_embeds.shape[:2], inputs_embeds.device)
+
+ hidden_states = inputs_embeds + position_embeddings
+ hidden_states = self.embeddings_layer_norm(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.config.dropout, training=self.training)
+
+ encoder_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ # check if head_mask has a correct number of layers specified if desired
+ if head_mask is not None:
+ assert head_mask.size()[0] == (
+ len(self.layers)
+ ), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_hidden_states = encoder_hidden_states + (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ encoder_layer.__call__,
+ hidden_states,
+ extended_attention_mask,
+ (head_mask[idx] if head_mask is not None else None),
+ output_attentions,
+ )
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask=extended_attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ encoder_hidden_states = encoder_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_hidden_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_hidden_states, attentions=all_attentions
+ )
+
+
+@add_start_docstrings(
+ "The standalone decoder part of the XLMProphetNetModel.",
+ XLM_PROPHETNET_START_DOCSTRING,
+)
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoder with microsoft/prophetnet-large-uncased->patrickvonplaten/xprophetnet-large-uncased-standalone, ProphetNet->XLMProphetNet, PROPHETNET->XLM_PROPHETNET,
+class XLMProphetNetDecoder(XLMProphetNetPreTrainedModel):
+ r"""
+ word_embeddings (`torch.nn.Embeddings` of shape `(config.vocab_size, config.hidden_size)`, *optional*):
+ The word embedding parameters. This can be used to initialize [`XLMProphetNetEncoder`] with pre-defined word
+ embeddings instead of randomly initialized word embeddings.
+ """
+
+ def __init__(self, config: XLMProphetNetConfig, word_embeddings: Optional[nn.Embedding] = None):
+ super().__init__(config)
+
+ self.ngram = config.ngram
+ self.num_buckets = config.num_buckets
+ self.relative_max_distance = config.relative_max_distance
+ self.dropout = config.dropout
+ self.max_target_positions = config.max_position_embeddings
+
+ self.word_embeddings = (
+ word_embeddings
+ if word_embeddings is not None
+ else nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ )
+ self.position_embeddings = XLMProphetNetPositionalEmbeddings(config)
+
+ self.ngram_embeddings = nn.Embedding(self.ngram, config.hidden_size, None)
+ self.layers = nn.ModuleList([XLMProphetNetDecoderLayer(config) for _ in range(config.num_decoder_layers)])
+ self.embeddings_layer_norm = LayerNorm(config.hidden_size)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.word_embeddings = value
+
+ @add_start_docstrings_to_model_forward(XLM_PROPHETNET_STANDALONE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=XLMProphetNetDecoderModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, XLMProphetNetDecoderModelOutput]:
+ r"""
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, XLMProphetNetDecoder
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone")
+ >>> model = XLMProphetNetDecoder.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone", add_cross_attention=False)
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
+ >>> outputs = model(**inputs)
+
+ >>> last_hidden_states = outputs.last_hidden_state
+ ```"""
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is None and inputs_embeds is None:
+ raise ValueError("Either `decoder_input_ids` or `decoder_inputs_embeds` has to be passed.")
+ elif input_ids is not None and inputs_embeds is not None:
+ raise ValueError("Make sure to only pass `decoder_input_ids` or `decoder_inputs_embeds`.")
+ elif input_ids is not None and inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+
+ batch_size, sequence_length = inputs_embeds.shape[:2]
+
+ main_stream_pos_embed, position_ids = self.position_embeddings(
+ (batch_size, sequence_length),
+ device=inputs_embeds.device,
+ past_key_values=past_key_values,
+ )
+
+ if past_key_values is not None:
+ main_relative_position_buckets, predict_relative_position_buckets = None, None
+ else:
+ (
+ main_relative_position_buckets,
+ predict_relative_position_buckets,
+ ) = self.compute_buffered_relative_buckets(position_ids)
+ predicting_stream_pos_embed = self.position_embeddings._forward(position_ids + 1)
+
+ # add position embeddings
+ hidden_states = inputs_embeds + main_stream_pos_embed
+
+ ngram_embeddings = self.ngram_embeddings.weight
+
+ # prepare attention mask
+ if past_key_values is not None:
+ assert (
+ hidden_states.size(1) == 1
+ ), "At the moment `use_cache` is only supported for `decoder_input_ids` of length 1"
+
+ ngram_hidden_states = [
+ (ngram_embeddings[ngram - 1] + predicting_stream_pos_embed).repeat(batch_size, 1, 1)
+ for ngram in range(self.ngram)
+ ]
+ extended_attention_mask = None
+ extended_predict_attention_mask = None
+ else:
+ ngram_hidden_states = [
+ (ngram_embeddings[ngram - 1] + predicting_stream_pos_embed) for ngram in range(self.ngram)
+ ]
+ extended_attention_mask = self.prepare_attention_mask(hidden_states, attention_mask)
+ extended_predict_attention_mask = self.prepare_predict_attention_mask(hidden_states, attention_mask)
+
+ # prepare encoder attention mask
+ if encoder_attention_mask is not None:
+ extended_encoder_attention_mask = (
+ 1.0 - encoder_attention_mask[:, None, None, :].repeat(1, self.config.num_decoder_attention_heads, 1, 1)
+ ) * torch.finfo(self.dtype).min
+ extended_encoder_attention_mask = extended_encoder_attention_mask.to(inputs_embeds.dtype)
+ else:
+ extended_encoder_attention_mask = None
+
+ hidden_states = torch.cat([hidden_states] + ngram_hidden_states, 1)
+
+ if self.embeddings_layer_norm:
+ hidden_states = self.embeddings_layer_norm(hidden_states)
+
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ # init attentions, hidden_states and cache with empty tuples
+ all_main_stream_hidden_states = () if output_hidden_states else None
+ all_ngram_stream_hidden_states = () if output_hidden_states and self.config.ngram > 0 else None
+
+ all_main_stream_attns = () if output_attentions else None
+ all_ngram_stream_attns = () if output_attentions else None
+ all_cross_attns = () if output_attentions and self.config.add_cross_attention else None
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ present_key_values = () if use_cache else None
+
+ # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
+ for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
+ if attn_mask is not None:
+ assert attn_mask.size()[0] == (len(self.layers)), (
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
+ f" {head_mask.size()[0]}."
+ )
+ for idx, decoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ # grad cannot be kept because tensor is sliced
+ all_main_stream_hidden_states += (hidden_states[:, :sequence_length],)
+ if self.config.ngram > 0:
+ all_ngram_stream_hidden_states += (hidden_states[:, sequence_length:],)
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ extended_attention_mask,
+ encoder_hidden_states,
+ extended_encoder_attention_mask,
+ (head_mask[idx] if head_mask is not None else None),
+ (cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None),
+ extended_predict_attention_mask,
+ main_relative_position_buckets,
+ predict_relative_position_buckets,
+ position_ids,
+ None,
+ use_cache,
+ output_attentions,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=extended_attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attn_mask=extended_encoder_attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ cross_attn_layer_head_mask=(
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
+ ),
+ extended_predict_attention_mask=extended_predict_attention_mask,
+ main_relative_position_buckets=main_relative_position_buckets,
+ predict_relative_position_buckets=predict_relative_position_buckets,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ present_key_values += (layer_outputs[4 if output_attentions else 1],)
+
+ if output_attentions:
+ all_main_stream_attns += (layer_outputs[1],)
+ all_ngram_stream_attns += (layer_outputs[2],)
+
+ if self.config.add_cross_attention:
+ all_cross_attns += (layer_outputs[3],)
+
+ if output_hidden_states:
+ all_main_stream_hidden_states += (hidden_states[:, :sequence_length],)
+ if self.config.ngram > 0:
+ all_ngram_stream_hidden_states += (hidden_states[:, sequence_length:],)
+
+ # split last_hidden_state for return
+ last_hidden_state = hidden_states[:, :sequence_length]
+ last_hidden_state_ngram = hidden_states[:, sequence_length:] if self.config.ngram > 0 else None
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ last_hidden_state,
+ last_hidden_state_ngram,
+ present_key_values,
+ all_main_stream_hidden_states,
+ all_ngram_stream_hidden_states,
+ all_main_stream_attns,
+ all_ngram_stream_attns,
+ all_cross_attns,
+ ]
+ if v is not None
+ )
+ return XLMProphetNetDecoderModelOutput(
+ last_hidden_state=last_hidden_state,
+ last_hidden_state_ngram=last_hidden_state_ngram,
+ past_key_values=present_key_values,
+ hidden_states=all_main_stream_hidden_states,
+ hidden_states_ngram=all_ngram_stream_hidden_states,
+ attentions=all_main_stream_attns,
+ ngram_attentions=all_ngram_stream_attns,
+ cross_attentions=all_cross_attns,
+ )
+
+ def compute_buffered_relative_buckets(self, position_ids):
+ batch_size, sequence_length = position_ids.shape
+
+ position_ids = torch.arange(1, self.max_target_positions).to(position_ids.device).repeat(1, 1)
+ main_relative_buckets, predict_relative_buckets = compute_all_stream_relative_buckets(
+ self.num_buckets, self.relative_max_distance, position_ids
+ )
+
+ # buffer relative buckets
+ main_relative_buckets = main_relative_buckets[:, :sequence_length, :sequence_length].repeat(batch_size, 1, 1)
+ predict_relative_buckets = torch.cat(
+ [
+ predict_relative_buckets[:, :sequence_length, :sequence_length],
+ predict_relative_buckets[
+ :, :sequence_length, self.max_target_positions : self.max_target_positions + sequence_length
+ ],
+ ],
+ 2,
+ ).repeat(batch_size, 1, 1)
+
+ return main_relative_buckets, predict_relative_buckets
+
+ def prepare_attention_mask(self, hidden_states, attention_mask):
+ batch_size, seq_length = hidden_states.shape[:2]
+
+ # get causal mask
+ causal_mask = torch.full(
+ (seq_length, seq_length),
+ torch.finfo(hidden_states.dtype).min,
+ dtype=hidden_states.dtype,
+ device=hidden_states.device,
+ )
+ causal_mask = torch.triu(causal_mask, 1)
+
+ extended_causal_mask = causal_mask[:seq_length, :seq_length][None, None, :, :].expand(
+ (batch_size, self.config.num_decoder_attention_heads) + causal_mask.shape
+ )
+
+ # add usual attention mask
+ if attention_mask is not None:
+ extended_attention_mask = (1.0 - attention_mask[:, None, None, :]) * torch.finfo(self.dtype).min
+ extended_attention_mask = extended_causal_mask + extended_attention_mask
+ else:
+ extended_attention_mask = extended_causal_mask
+ return extended_attention_mask.to(hidden_states.dtype)
+
+ def prepare_predict_attention_mask(self, hidden_states, attention_mask):
+ batch_size, seq_length = hidden_states.shape[:2]
+
+ # get causal mask
+ predict_causal_mask = ngram_attention_bias(
+ self.max_target_positions, self.ngram, hidden_states.device, hidden_states.dtype
+ )
+ predict_causal_mask = torch.cat(
+ [
+ predict_causal_mask[:, :seq_length, :seq_length],
+ predict_causal_mask[
+ :, :seq_length, self.max_target_positions : self.max_target_positions + seq_length
+ ],
+ ],
+ dim=-1,
+ )
+ extended_predict_causal_mask = predict_causal_mask[None, None, :, :, :].expand(
+ (batch_size, self.config.num_decoder_attention_heads) + predict_causal_mask.shape
+ )
+
+ # add usual attention mask
+ if attention_mask is not None:
+ extended_attention_mask = (1.0 - attention_mask[:, None, None, None, :]) * torch.finfo(self.dtype).min
+ extended_attention_mask = extended_attention_mask.expand(
+ (batch_size, self.config.num_decoder_attention_heads, self.ngram, seq_length, seq_length)
+ )
+ # predicted stream attention_mask should always be 0
+ extended_attention_mask = torch.cat(
+ [extended_attention_mask, torch.zeros_like(extended_attention_mask)], dim=-1
+ )
+ extended_predict_attention_mask = extended_predict_causal_mask + extended_attention_mask
+ else:
+ extended_predict_attention_mask = extended_predict_causal_mask
+ return extended_predict_attention_mask.to(hidden_states.dtype)
+
+
+@add_start_docstrings(
+ "The bare XLMProphetNet Model outputting raw hidden-states without any specific head on top.",
+ XLM_PROPHETNET_START_DOCSTRING,
+)
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetModel with microsoft/prophetnet-large-uncased->patrickvonplaten/xprophetnet-large-uncased-standalone, ProphetNet->XLMProphetNet, PROPHETNET->XLM_PROPHETNET
+class XLMProphetNetModel(XLMProphetNetPreTrainedModel):
+ _tied_weights_keys = ["encoder.word_embeddings.weight", "decoder.word_embeddings.weight"]
+
+ def __init__(self, config: XLMProphetNetConfig):
+ super().__init__(config)
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+
+ encoder_config = copy.deepcopy(config)
+ encoder_config.is_encoder_decoder = False
+ encoder_config.use_cache = False
+ self.encoder = XLMProphetNetEncoder(encoder_config, self.word_embeddings)
+
+ decoder_config = copy.deepcopy(config)
+ decoder_config.is_decoder = True
+ decoder_config.is_encoder_decoder = False
+ self.decoder = XLMProphetNetDecoder(decoder_config, self.word_embeddings)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.word_embeddings = value
+ self.encoder.word_embeddings = self.word_embeddings
+ self.decoder.word_embeddings = self.word_embeddings
+
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.encoder.word_embeddings, self.word_embeddings)
+ self._tie_or_clone_weights(self.decoder.word_embeddings, self.word_embeddings)
+
+ def get_encoder(self):
+ return self.encoder
+
+ def get_decoder(self):
+ return self.decoder
+
+ @add_start_docstrings_to_model_forward(XLM_PROPHETNET_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=XLMProphetNetSeq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.Tensor] = None,
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ decoder_inputs_embeds: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, XLMProphetNetSeq2SeqModelOutput]:
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, XLMProphetNetModel
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone")
+ >>> model = XLMProphetNetModel.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone")
+
+ >>> input_ids = tokenizer(
+ ... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
+ ... ).input_ids # Batch size 1
+ >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
+ >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
+
+ >>> last_hidden_states = outputs.last_hidden_state # main stream hidden states
+ >>> last_hidden_states_ngram = outputs.last_hidden_state_ngram # predict hidden states
+ ```"""
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if encoder_outputs is None:
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ # decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn)
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=attention_mask,
+ head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=decoder_inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ use_cache=use_cache,
+ return_dict=return_dict,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+ return XLMProphetNetSeq2SeqModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ last_hidden_state_ngram=decoder_outputs.last_hidden_state_ngram,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_ngram_hidden_states=decoder_outputs.hidden_states_ngram,
+ decoder_attentions=decoder_outputs.attentions,
+ decoder_ngram_attentions=decoder_outputs.ngram_attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ "The XLMProphetNet Model with a language modeling head. Can be used for sequence generation tasks.",
+ XLM_PROPHETNET_START_DOCSTRING,
+)
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetForConditionalGeneration with microsoft/prophetnet-large-uncased->patrickvonplaten/xprophetnet-large-uncased-standalone, ProphetNet->XLMProphetNet, PROPHETNET->XLM_PROPHETNET
+class XLMProphetNetForConditionalGeneration(XLMProphetNetPreTrainedModel):
+ _tied_weights_keys = ["encoder.word_embeddings.weight", "decoder.word_embeddings.weight", "lm_head.weight"]
+
+ def __init__(self, config: XLMProphetNetConfig):
+ super().__init__(config)
+ self.prophetnet = XLMProphetNetModel(config)
+ self.padding_idx = config.pad_token_id
+ self.disable_ngram_loss = config.disable_ngram_loss
+
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.prophetnet.word_embeddings, self.lm_head)
+
+ def get_input_embeddings(self):
+ return self.prophetnet.word_embeddings
+
+ @add_start_docstrings_to_model_forward(XLM_PROPHETNET_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=XLMProphetNetSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.Tensor] = None,
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[torch.Tensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ decoder_inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, XLMProphetNetSeq2SeqLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for
+ labels in `[0, ..., config.vocab_size]`
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, XLMProphetNetForConditionalGeneration
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone")
+ >>> model = XLMProphetNetForConditionalGeneration.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone")
+
+ >>> input_ids = tokenizer(
+ ... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
+ ... ).input_ids # Batch size 1
+ >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
+ >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
+
+ >>> logits_next_token = outputs.logits # logits to predict next token as usual
+ >>> logits_ngram_next_tokens = outputs.logits_ngram # logits to predict 2nd, 3rd, ... next tokens
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
+ # get decoder inputs from shifting lm labels to the right
+ decoder_input_ids = self._shift_right(labels)
+
+ outputs = self.prophetnet(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ encoder_outputs=encoder_outputs,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ decoder_inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ batch_size, sequence_length = (
+ decoder_input_ids.shape if decoder_input_ids is not None else decoder_inputs_embeds.shape[:2]
+ )
+
+ predicting_streams = outputs[1].view(batch_size, self.config.ngram, sequence_length, -1)
+ predict_logits = self.lm_head(predicting_streams)
+
+ logits = predict_logits[:, 0]
+ logits_ngram = predict_logits[:, 1:] if self.config.ngram > 1 else None
+
+ # To use .view in loss computation, make sure that logits is contiguous.
+ if not logits.is_contiguous():
+ logits = logits.contiguous()
+
+ loss = None
+ if labels is not None:
+ loss = self._compute_loss(predict_logits, labels)
+
+ if not return_dict:
+ all_logits = tuple(v for v in [logits, logits_ngram] if v is not None)
+ return (loss,) + all_logits + outputs[2:] if loss is not None else all_logits + outputs[2:]
+ else:
+ return XLMProphetNetSeq2SeqLMOutput(
+ loss=loss,
+ logits=logits,
+ logits_ngram=logits_ngram,
+ past_key_values=outputs.past_key_values,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_ngram_hidden_states=outputs.decoder_ngram_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ decoder_ngram_attentions=outputs.decoder_ngram_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ )
+
+ def _compute_loss(self, logits, labels, ignore_index=-100):
+ expend_targets = labels.new_zeros(self.config.ngram, labels.size(0), labels.size(1)).fill_(ignore_index)
+
+ for i in range(self.config.ngram):
+ if i > 0 and self.disable_ngram_loss:
+ break
+ expend_targets[i, :, :] = labels
+
+ logits = logits.transpose(0, 1).contiguous()
+ lprobs = nn.functional.log_softmax(
+ logits.view(-1, logits.size(-1)),
+ dim=-1,
+ dtype=torch.float32,
+ )
+
+ loss = nn.functional.nll_loss(lprobs, expend_targets.view(-1), reduction="mean")
+
+ if self.config.eps > 0.0:
+ smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
+ non_masked_tokens = expend_targets.ne(ignore_index).view(-1)
+ smooth_loss = smooth_loss[non_masked_tokens]
+ smooth_loss = smooth_loss.mean()
+
+ eps_i = self.config.eps / lprobs.size(-1)
+ loss = (1.0 - self.config.eps) * loss + eps_i * smooth_loss
+
+ return loss
+
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ head_mask=None,
+ decoder_head_mask=None,
+ cross_attn_head_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ assert encoder_outputs is not None, "`encoder_outputs` have to be passed for generation."
+
+ if past_key_values:
+ decoder_input_ids = decoder_input_ids[:, -1:]
+ # first step, decoder_cached_states are empty
+ return {
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
+ "encoder_outputs": encoder_outputs,
+ "past_key_values": past_key_values,
+ "decoder_input_ids": decoder_input_ids,
+ "attention_mask": attention_mask,
+ "head_mask": head_mask,
+ "decoder_head_mask": decoder_head_mask,
+ "cross_attn_head_mask": cross_attn_head_mask,
+ "use_cache": use_cache,
+ }
+
+ def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
+ return self._shift_right(labels)
+
+ @staticmethod
+ # Copied from transformers.models.bart.modeling_bart.BartForConditionalGeneration._reorder_cache
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ # cached cross_attention states don't have to be reordered -> they are always the same
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2])
+ + layer_past[2:],
+ )
+ return reordered_past
+
+ def get_encoder(self):
+ return self.prophetnet.encoder
+
+ def get_decoder(self):
+ return self.prophetnet.decoder
+
+
+@add_start_docstrings(
+ "The standalone decoder part of the XLMProphetNetModel with a lm head on top. The model can be used for causal"
+ " language modeling.",
+ XLM_PROPHETNET_START_DOCSTRING,
+)
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetForCausalLM with microsoft/prophetnet-large-uncased->patrickvonplaten/xprophetnet-large-uncased-standalone, ProphetNet->XLMProphetNet, PROPHETNET->XLM_PROPHETNET
+class XLMProphetNetForCausalLM(XLMProphetNetPreTrainedModel):
+ _tied_weights_keys = [
+ "prophetnet.word_embeddings.weight",
+ "prophetnet.decoder.word_embeddings.weight",
+ "lm_head.weight",
+ ]
+
+ def __init__(self, config: XLMProphetNetConfig):
+ # set config for CLM
+ config = copy.deepcopy(config)
+ config.is_decoder = True
+ config.is_encoder_decoder = False
+ super().__init__(config)
+ self.prophetnet = XLMProphetNetDecoderWrapper(config)
+
+ self.padding_idx = config.pad_token_id
+ self.disable_ngram_loss = config.disable_ngram_loss
+
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.prophetnet.decoder.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.prophetnet.decoder.word_embeddings = value
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.prophetnet.decoder.word_embeddings, self.lm_head)
+
+ def set_decoder(self, decoder):
+ self.prophetnet.decoder = decoder
+
+ def get_decoder(self):
+ return self.prophetnet.decoder
+
+ @add_start_docstrings_to_model_forward(XLM_PROPHETNET_STANDALONE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=XLMProphetNetDecoderLMOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, XLMProphetNetDecoderLMOutput]:
+ r"""
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
+ ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, XLMProphetNetForCausalLM
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone")
+ >>> model = XLMProphetNetForCausalLM.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone")
+ >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
+ >>> outputs = model(**inputs)
+
+ >>> logits = outputs.logits
+
+ >>> # Model can also be used with EncoderDecoder framework
+ >>> from transformers import BertTokenizer, EncoderDecoderModel, AutoTokenizer
+ >>> import torch
+
+ >>> tokenizer_enc = BertTokenizer.from_pretrained("google-bert/bert-large-uncased")
+ >>> tokenizer_dec = AutoTokenizer.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone")
+ >>> model = EncoderDecoderModel.from_encoder_decoder_pretrained(
+ ... "google-bert/bert-large-uncased", "patrickvonplaten/xprophetnet-large-uncased-standalone"
+ ... )
+
+ >>> ARTICLE = (
+ ... "the us state department said wednesday it had received no "
+ ... "formal word from bolivia that it was expelling the us ambassador there "
+ ... "but said the charges made against him are `` baseless ."
+ ... )
+ >>> input_ids = tokenizer_enc(ARTICLE, return_tensors="pt").input_ids
+ >>> labels = tokenizer_dec(
+ ... "us rejects charges against its ambassador in bolivia", return_tensors="pt"
+ ... ).input_ids
+ >>> outputs = model(input_ids=input_ids, decoder_input_ids=labels[:, :-1], labels=labels[:, 1:])
+
+ >>> loss = outputs.loss
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn)
+ outputs = self.prophetnet.decoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ head_mask=head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ batch_size, sequence_length = input_ids.shape if input_ids is not None else inputs_embeds.shape[:2]
+
+ predicting_streams = outputs[1].view(batch_size, self.config.ngram, sequence_length, -1)
+ predict_logits = self.lm_head(predicting_streams)
+
+ logits = predict_logits[:, 0]
+ logits_ngram = predict_logits[:, 1:] if self.config.ngram > 1 else None
+
+ loss = None
+ if labels is not None:
+ loss = self._compute_loss(predict_logits, labels)
+
+ if not return_dict:
+ all_logits = tuple(v for v in [logits, logits_ngram] if v is not None)
+ return (loss,) + all_logits + outputs[2:] if loss is not None else all_logits + outputs[2:]
+ else:
+ return XLMProphetNetDecoderLMOutput(
+ loss=loss,
+ logits=logits,
+ logits_ngram=logits_ngram,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ hidden_states_ngram=outputs.hidden_states_ngram,
+ attentions=outputs.attentions,
+ ngram_attentions=outputs.ngram_attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+ def _compute_loss(self, logits, labels, ignore_index=-100):
+ expend_targets = labels.new_zeros(self.config.ngram, labels.size(0), labels.size(1)).fill_(ignore_index)
+
+ for i in range(self.config.ngram):
+ if i > 0 and self.disable_ngram_loss:
+ break
+ expend_targets[i, :, :] = labels
+
+ logits = logits.transpose(0, 1).contiguous()
+ lprobs = nn.functional.log_softmax(
+ logits.view(-1, logits.size(-1)),
+ dim=-1,
+ dtype=torch.float32,
+ )
+
+ loss = nn.functional.nll_loss(lprobs, expend_targets.view(-1), reduction="mean")
+
+ if self.config.eps > 0.0:
+ smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
+ non_masked_tokens = expend_targets.ne(ignore_index).view(-1)
+ smooth_loss = smooth_loss[non_masked_tokens]
+ smooth_loss = smooth_loss.mean()
+
+ eps_i = self.config.eps / lprobs.size(-1)
+ loss = (1.0 - self.config.eps) * loss + eps_i * smooth_loss
+
+ return loss
+
+ def prepare_inputs_for_generation(
+ self,
+ input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ head_mask=None,
+ use_cache=None,
+ **kwargs,
+ ):
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
+ if attention_mask is None:
+ attention_mask = input_ids.new_ones(input_ids.shape)
+
+ if past_key_values:
+ input_ids = input_ids[:, -1:]
+ # first step, decoder_cached_states are empty
+ return {
+ "input_ids": input_ids, # encoder_outputs is defined. input_ids not needed
+ "attention_mask": attention_mask,
+ "head_mask": head_mask,
+ "past_key_values": past_key_values,
+ "use_cache": use_cache,
+ }
+
+ @staticmethod
+ # Copied from transformers.models.bart.modeling_bart.BartForCausalLM._reorder_cache
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+ return reordered_past
+
+
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderWrapper with ProphetNet->XLMProphetNet, prophetnet->XLMProphetNet
+class XLMProphetNetDecoderWrapper(XLMProphetNetPreTrainedModel):
+ """
+ This is a wrapper class, so that [`XLMProphetNetForCausalLM`] can correctly be loaded from pretrained XLMProphetNet
+ classes.
+ """
+
+ def __init__(self, config: XLMProphetNetConfig):
+ super().__init__(config)
+
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ self.decoder = XLMProphetNetDecoder(config, word_embeddings=self.word_embeddings)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def _tie_weights(self):
+ self._tie_or_clone_weights(self.word_embeddings, self.decoder.get_input_embeddings())
+
+ def forward(self, *args, **kwargs):
+ return self.decoder(*args, **kwargs)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/tokenization_xlm_prophetnet.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/tokenization_xlm_prophetnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..c024d5d16dc04a1bae8764f3aa64e64989bf761d
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/tokenization_xlm_prophetnet.py
@@ -0,0 +1,341 @@
+# coding=utf-8
+# Copyright 2020 The Microsoft Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections
+import os
+from shutil import copyfile
+from typing import Any, Dict, List, Optional, Tuple
+
+from ...tokenization_utils import PreTrainedTokenizer
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+SPIECE_UNDERLINE = "▁"
+
+VOCAB_FILES_NAMES = {"vocab_file": "prophetnet.tokenizer"}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {
+ "microsoft/xprophetnet-large-wiki100-cased": (
+ "https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
+ ),
+ }
+}
+
+PRETRAINED_INIT_CONFIGURATION = {
+ "microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
+}
+
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
+ "microsoft/xprophetnet-large-wiki100-cased": 512,
+}
+
+
+def load_vocab(vocab_file):
+ """Loads a vocabulary file into a dictionary."""
+ vocab = collections.OrderedDict()
+ with open(vocab_file, "r", encoding="utf-8") as reader:
+ tokens = reader.readlines()
+ for index, token in enumerate(tokens):
+ token = token.rstrip("\n")
+ vocab[token] = index
+ return vocab
+
+
+class XLMProphetNetTokenizer(PreTrainedTokenizer):
+ """
+ Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
+ [SentencePiece](https://github.com/google/sentencepiece).
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ bos_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
+ The token used is the `sep_token`.
+
+
+
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
+ The token used for padding, for example when batching sequences of different lengths.
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ sp_model_kwargs (`dict`, *optional*):
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
+ to set:
+
+ - `enable_sampling`: Enable subword regularization.
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
+
+ - `nbest_size = {0,1}`: No sampling is performed.
+ - `nbest_size > 1`: samples from the nbest_size results.
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
+ using forward-filtering-and-backward-sampling algorithm.
+
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
+ BPE-dropout.
+
+ Attributes:
+ sp_model (`SentencePieceProcessor`):
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file,
+ bos_token="[SEP]",
+ eos_token="[SEP]",
+ sep_token="[SEP]",
+ unk_token="[UNK]",
+ pad_token="[PAD]",
+ cls_token="[CLS]",
+ mask_token="[MASK]",
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
+ **kwargs,
+ ) -> None:
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+
+ try:
+ import sentencepiece as spm
+ except ImportError:
+ logger.warning(
+ "You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
+ " pip install sentencepiece"
+ )
+ raise
+
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(str(vocab_file))
+ self.vocab_file = vocab_file
+
+ # Original fairseq vocab and spm vocab must be "aligned":
+ # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
+ # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
+ # fairseq | '' | '' | '' | '' | ',' | '.' | '▁' | 's' | '▁de' | '-'
+ # spm | '' | '' | '' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
+
+ # put special tokens and [unused] tokens into the vocab
+ self.fairseq_tokens_to_ids = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
+
+ for i in range(10):
+ tok = f"[unused{i}]"
+ self.fairseq_tokens_to_ids[tok] = 5 + i
+
+ # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
+ self.fairseq_offset = 12
+ self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
+
+ # TODO ArthurZ fairseq_ids_to_tokens should be removed
+
+ super().__init__(
+ bos_token=bos_token,
+ eos_token=eos_token,
+ sep_token=sep_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ sp_model_kwargs=self.sp_model_kwargs,
+ **kwargs,
+ )
+
+ @property
+ def can_save_slow_tokenizer(self) -> bool:
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["sp_model"] = None
+ return state
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+ try:
+ import sentencepiece as spm
+ except ImportError:
+ logger.warning(
+ "You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
+ " pip install sentencepiece"
+ )
+ raise
+
+ # for backward compatibility
+ if not hasattr(self, "sp_model_kwargs"):
+ self.sp_model_kwargs = {}
+
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(self.vocab_file)
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is None:
+ return ([0] * len(token_ids_0)) + [1]
+ return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLMProphetNet
+ does not make use of token type ids, therefore a list of zeros is returned.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of zeros.
+
+ """
+
+ sep = [self.sep_token_id]
+
+ if token_ids_1 is None:
+ return len(token_ids_0 + sep) * [0]
+ return len(token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
+
+ @property
+ def vocab_size(self):
+ return len(self.sp_model) + self.fairseq_offset
+
+ def get_vocab(self):
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ def _tokenize(self, text: str) -> str:
+ return self.sp_model.encode(text, out_type=str)
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ if token in self.fairseq_tokens_to_ids:
+ return self.fairseq_tokens_to_ids[token]
+ spm_id = self.sp_model.PieceToId(token)
+
+ # Need to return unknown token if the SP model returned 0
+ return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ if index in self.fairseq_ids_to_tokens:
+ return self.fairseq_ids_to_tokens[index]
+ return self.sp_model.IdToPiece(index - self.fairseq_offset)
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (strings for sub-words) in a single string."""
+ out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
+ return out_string
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+ elif not os.path.isfile(self.vocab_file):
+ with open(out_vocab_file, "wb") as fi:
+ content_spiece_model = self.sp_model.serialized_model_proto()
+ fi.write(content_spiece_model)
+
+ return (out_vocab_file,)
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A XLMProphetNet sequence has the following format:
+
+ - single sequence: `X [SEP]`
+ - pair of sequences: `A [SEP] B [SEP]`
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+
+ if token_ids_1 is None:
+ return token_ids_0 + [self.sep_token_id]
+ sep = [self.sep_token_id]
+ return token_ids_0 + sep + token_ids_1 + sep