diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ce399f92e0fa4d4dc43554453767d21521e63c1f
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__init__.py
@@ -0,0 +1,112 @@
+# Copyright 2021 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_flax_available,
+ is_torch_available,
+ is_vision_available,
+)
+
+
+_import_structure = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
+
+try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["feature_extraction_beit"] = ["BeitFeatureExtractor"]
+ _import_structure["image_processing_beit"] = ["BeitImageProcessor"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_beit"] = [
+ "BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "BeitForImageClassification",
+ "BeitForMaskedImageModeling",
+ "BeitForSemanticSegmentation",
+ "BeitModel",
+ "BeitPreTrainedModel",
+ "BeitBackbone",
+ ]
+
+
+try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_flax_beit"] = [
+ "FlaxBeitForImageClassification",
+ "FlaxBeitForMaskedImageModeling",
+ "FlaxBeitModel",
+ "FlaxBeitPreTrainedModel",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
+
+ try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .feature_extraction_beit import BeitFeatureExtractor
+ from .image_processing_beit import BeitImageProcessor
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_beit import (
+ BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ BeitBackbone,
+ BeitForImageClassification,
+ BeitForMaskedImageModeling,
+ BeitForSemanticSegmentation,
+ BeitModel,
+ BeitPreTrainedModel,
+ )
+
+ try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_flax_beit import (
+ FlaxBeitForImageClassification,
+ FlaxBeitForMaskedImageModeling,
+ FlaxBeitModel,
+ FlaxBeitPreTrainedModel,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5f1446d77c72be75d026e0759dcff5b9ff4e8234
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/configuration_beit.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/configuration_beit.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d0eefb2d4fc42807d324470b5a34a53ee7d62cc0
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/configuration_beit.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/convert_beit_unilm_to_pytorch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/convert_beit_unilm_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f6287e136ad46664aae596952fc74c8d200c6586
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/convert_beit_unilm_to_pytorch.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/feature_extraction_beit.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/feature_extraction_beit.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1a16e70e70bec65b6009fc4abf9965b13987ee49
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/feature_extraction_beit.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/image_processing_beit.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/image_processing_beit.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1aa4d1b12be75d5e84330c71f958cfe456796cf1
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/image_processing_beit.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/modeling_beit.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/modeling_beit.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cf4a36fcbc861620b645719e0fea75f8466e7701
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/modeling_beit.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/modeling_flax_beit.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/modeling_flax_beit.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..26347e8d24b2db511910b0c0319e54bb5c3918ed
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/__pycache__/modeling_flax_beit.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/configuration_beit.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/configuration_beit.py
new file mode 100644
index 0000000000000000000000000000000000000000..dbb1e755e94b36a651ed60f85044ddb84b2d3bef
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/configuration_beit.py
@@ -0,0 +1,231 @@
+# coding=utf-8
+# Copyright Microsoft Research and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" BEiT model configuration"""
+from collections import OrderedDict
+from typing import Mapping
+
+from packaging import version
+
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxConfig
+from ...utils import logging
+from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class BeitConfig(BackboneConfigMixin, PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`BeitModel`]. It is used to instantiate an BEiT
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the BEiT
+ [microsoft/beit-base-patch16-224-pt22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k) architecture.
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 8192):
+ Vocabulary size of the BEiT model. Defines the number of different image tokens that can be used during
+ pre-training.
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ image_size (`int`, *optional*, defaults to 224):
+ The size (resolution) of each image.
+ patch_size (`int`, *optional*, defaults to 16):
+ The size (resolution) of each patch.
+ num_channels (`int`, *optional*, defaults to 3):
+ The number of input channels.
+ use_mask_token (`bool`, *optional*, defaults to `False`):
+ Whether to use a mask token for masked image modeling.
+ use_absolute_position_embeddings (`bool`, *optional*, defaults to `False`):
+ Whether to use BERT-style absolute position embeddings.
+ use_relative_position_bias (`bool`, *optional*, defaults to `False`):
+ Whether to use T5-style relative position embeddings in the self-attention layers.
+ use_shared_relative_position_bias (`bool`, *optional*, defaults to `False`):
+ Whether to use the same relative position embeddings across all self-attention layers of the Transformer.
+ layer_scale_init_value (`float`, *optional*, defaults to 0.1):
+ Scale to use in the self-attention layers. 0.1 for base, 1e-5 for large. Set 0 to disable layer scale.
+ drop_path_rate (`float`, *optional*, defaults to 0.1):
+ Stochastic depth rate per sample (when applied in the main path of residual layers).
+ use_mean_pooling (`bool`, *optional*, defaults to `True`):
+ Whether to mean pool the final hidden states of the patches instead of using the final hidden state of the
+ CLS token, before applying the classification head.
+ pool_scales (`Tuple[int]`, *optional*, defaults to `[1, 2, 3, 6]`):
+ Pooling scales used in Pooling Pyramid Module applied on the last feature map.
+ use_auxiliary_head (`bool`, *optional*, defaults to `True`):
+ Whether to use an auxiliary head during training.
+ auxiliary_loss_weight (`float`, *optional*, defaults to 0.4):
+ Weight of the cross-entropy loss of the auxiliary head.
+ auxiliary_channels (`int`, *optional*, defaults to 256):
+ Number of channels to use in the auxiliary head.
+ auxiliary_num_convs (`int`, *optional*, defaults to 1):
+ Number of convolutional layers to use in the auxiliary head.
+ auxiliary_concat_input (`bool`, *optional*, defaults to `False`):
+ Whether to concatenate the output of the auxiliary head with the input before the classification layer.
+ semantic_loss_ignore_index (`int`, *optional*, defaults to 255):
+ The index that is ignored by the loss function of the semantic segmentation model.
+ out_features (`List[str]`, *optional*):
+ If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
+ (depending on how many stages the model has). If unset and `out_indices` is set, will default to the
+ corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
+ same order as defined in the `stage_names` attribute.
+ out_indices (`List[int]`, *optional*):
+ If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
+ many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
+ If unset and `out_features` is unset, will default to the last stage. Must be in the
+ same order as defined in the `stage_names` attribute.
+ add_fpn (`bool`, *optional*, defaults to `False`):
+ Whether to add a FPN as part of the backbone. Only relevant for [`BeitBackbone`].
+ reshape_hidden_states (`bool`, *optional*, defaults to `True`):
+ Whether to reshape the feature maps to 4D tensors of shape `(batch_size, hidden_size, height, width)` in
+ case the model is used as backbone. If `False`, the feature maps will be 3D tensors of shape `(batch_size,
+ seq_len, hidden_size)`. Only relevant for [`BeitBackbone`].
+
+ Example:
+
+ ```python
+ >>> from transformers import BeitConfig, BeitModel
+
+ >>> # Initializing a BEiT beit-base-patch16-224-pt22k style configuration
+ >>> configuration = BeitConfig()
+
+ >>> # Initializing a model (with random weights) from the beit-base-patch16-224-pt22k style configuration
+ >>> model = BeitModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "beit"
+
+ def __init__(
+ self,
+ vocab_size=8192,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.0,
+ attention_probs_dropout_prob=0.0,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ image_size=224,
+ patch_size=16,
+ num_channels=3,
+ use_mask_token=False,
+ use_absolute_position_embeddings=False,
+ use_relative_position_bias=False,
+ use_shared_relative_position_bias=False,
+ layer_scale_init_value=0.1,
+ drop_path_rate=0.1,
+ use_mean_pooling=True,
+ pool_scales=[1, 2, 3, 6],
+ use_auxiliary_head=True,
+ auxiliary_loss_weight=0.4,
+ auxiliary_channels=256,
+ auxiliary_num_convs=1,
+ auxiliary_concat_input=False,
+ semantic_loss_ignore_index=255,
+ out_features=None,
+ out_indices=None,
+ add_fpn=False,
+ reshape_hidden_states=True,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.intermediate_size = intermediate_size
+ self.hidden_act = hidden_act
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.use_mask_token = use_mask_token
+ self.use_absolute_position_embeddings = use_absolute_position_embeddings
+ self.use_relative_position_bias = use_relative_position_bias
+ self.use_shared_relative_position_bias = use_shared_relative_position_bias
+ self.layer_scale_init_value = layer_scale_init_value
+ self.drop_path_rate = drop_path_rate
+ self.use_mean_pooling = use_mean_pooling
+ # decode head attributes (semantic segmentation)
+ self.pool_scales = pool_scales
+ # auxiliary head attributes (semantic segmentation)
+ self.use_auxiliary_head = use_auxiliary_head
+ self.auxiliary_loss_weight = auxiliary_loss_weight
+ self.auxiliary_channels = auxiliary_channels
+ self.auxiliary_num_convs = auxiliary_num_convs
+ self.auxiliary_concat_input = auxiliary_concat_input
+ self.semantic_loss_ignore_index = semantic_loss_ignore_index
+
+ # handle backwards compatibility
+ if "segmentation_indices" in kwargs:
+ logger.warning(
+ "The `segmentation_indices` argument is deprecated and will be removed in a future version, use `out_indices` instead.",
+ FutureWarning,
+ )
+ out_indices = kwargs.pop("segmentation_indices")
+
+ # backbone attributes
+ self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, self.num_hidden_layers + 1)]
+ self._out_features, self._out_indices = get_aligned_output_features_output_indices(
+ out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
+ )
+ self.add_fpn = add_fpn
+ self.reshape_hidden_states = reshape_hidden_states
+
+
+# Copied from transformers.models.vit.configuration_vit.ViTOnnxConfig
+class BeitOnnxConfig(OnnxConfig):
+ torch_onnx_minimum_version = version.parse("1.11")
+
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ return OrderedDict(
+ [
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
+ ]
+ )
+
+ @property
+ def atol_for_validation(self) -> float:
+ return 1e-4
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/convert_beit_unilm_to_pytorch.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/convert_beit_unilm_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..757113c8a60fcca061c256ed659a46f700ced08f
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/convert_beit_unilm_to_pytorch.py
@@ -0,0 +1,374 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert BEiT checkpoints from the unilm repository."""
+
+
+import argparse
+import json
+from pathlib import Path
+
+import requests
+import torch
+from datasets import load_dataset
+from huggingface_hub import hf_hub_download
+from PIL import Image
+
+from transformers import (
+ BeitConfig,
+ BeitForImageClassification,
+ BeitForMaskedImageModeling,
+ BeitForSemanticSegmentation,
+ BeitImageProcessor,
+)
+from transformers.image_utils import PILImageResampling
+from transformers.utils import logging
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+
+# here we list all keys to be renamed (original name on the left, our name on the right)
+def create_rename_keys(config, has_lm_head=False, is_semantic=False):
+ prefix = "backbone." if is_semantic else ""
+
+ rename_keys = []
+ for i in range(config.num_hidden_layers):
+ # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
+ rename_keys.append((f"{prefix}blocks.{i}.norm1.weight", f"beit.encoder.layer.{i}.layernorm_before.weight"))
+ rename_keys.append((f"{prefix}blocks.{i}.norm1.bias", f"beit.encoder.layer.{i}.layernorm_before.bias"))
+ rename_keys.append(
+ (f"{prefix}blocks.{i}.attn.proj.weight", f"beit.encoder.layer.{i}.attention.output.dense.weight")
+ )
+ rename_keys.append(
+ (f"{prefix}blocks.{i}.attn.proj.bias", f"beit.encoder.layer.{i}.attention.output.dense.bias")
+ )
+ rename_keys.append((f"{prefix}blocks.{i}.norm2.weight", f"beit.encoder.layer.{i}.layernorm_after.weight"))
+ rename_keys.append((f"{prefix}blocks.{i}.norm2.bias", f"beit.encoder.layer.{i}.layernorm_after.bias"))
+ rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.weight", f"beit.encoder.layer.{i}.intermediate.dense.weight"))
+ rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.bias", f"beit.encoder.layer.{i}.intermediate.dense.bias"))
+ rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.weight", f"beit.encoder.layer.{i}.output.dense.weight"))
+ rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.bias", f"beit.encoder.layer.{i}.output.dense.bias"))
+
+ # projection layer + position embeddings
+ rename_keys.extend(
+ [
+ (f"{prefix}cls_token", "beit.embeddings.cls_token"),
+ (f"{prefix}patch_embed.proj.weight", "beit.embeddings.patch_embeddings.projection.weight"),
+ (f"{prefix}patch_embed.proj.bias", "beit.embeddings.patch_embeddings.projection.bias"),
+ ]
+ )
+
+ if has_lm_head:
+ # mask token + shared relative position bias + layernorm
+ rename_keys.extend(
+ [
+ ("mask_token", "beit.embeddings.mask_token"),
+ (
+ "rel_pos_bias.relative_position_bias_table",
+ "beit.encoder.relative_position_bias.relative_position_bias_table",
+ ),
+ (
+ "rel_pos_bias.relative_position_index",
+ "beit.encoder.relative_position_bias.relative_position_index",
+ ),
+ ("norm.weight", "layernorm.weight"),
+ ("norm.bias", "layernorm.bias"),
+ ]
+ )
+ elif is_semantic:
+ # semantic segmentation classification heads
+ rename_keys.extend(
+ [
+ ("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
+ ("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
+ ("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
+ ("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
+ ]
+ )
+ else:
+ # layernorm + classification head
+ rename_keys.extend(
+ [
+ ("fc_norm.weight", "beit.pooler.layernorm.weight"),
+ ("fc_norm.bias", "beit.pooler.layernorm.bias"),
+ ("head.weight", "classifier.weight"),
+ ("head.bias", "classifier.bias"),
+ ]
+ )
+
+ return rename_keys
+
+
+# we split up the matrix of each encoder layer into queries, keys and values
+def read_in_q_k_v(state_dict, config, has_lm_head=False, is_semantic=False):
+ for i in range(config.num_hidden_layers):
+ prefix = "backbone." if is_semantic else ""
+ # queries, keys and values
+ in_proj_weight = state_dict.pop(f"{prefix}blocks.{i}.attn.qkv.weight")
+ q_bias = state_dict.pop(f"{prefix}blocks.{i}.attn.q_bias")
+ v_bias = state_dict.pop(f"{prefix}blocks.{i}.attn.v_bias")
+
+ state_dict[f"beit.encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[
+ : config.hidden_size, :
+ ]
+ state_dict[f"beit.encoder.layer.{i}.attention.attention.query.bias"] = q_bias
+ state_dict[f"beit.encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[
+ config.hidden_size : config.hidden_size * 2, :
+ ]
+ state_dict[f"beit.encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[
+ -config.hidden_size :, :
+ ]
+ state_dict[f"beit.encoder.layer.{i}.attention.attention.value.bias"] = v_bias
+
+ # gamma_1 and gamma_2
+ # we call them lambda because otherwise they are renamed when using .from_pretrained
+ gamma_1 = state_dict.pop(f"{prefix}blocks.{i}.gamma_1")
+ gamma_2 = state_dict.pop(f"{prefix}blocks.{i}.gamma_2")
+
+ state_dict[f"beit.encoder.layer.{i}.lambda_1"] = gamma_1
+ state_dict[f"beit.encoder.layer.{i}.lambda_2"] = gamma_2
+
+ # relative_position bias table + index
+ if not has_lm_head:
+ # each layer has its own relative position bias
+ table = state_dict.pop(f"{prefix}blocks.{i}.attn.relative_position_bias_table")
+ index = state_dict.pop(f"{prefix}blocks.{i}.attn.relative_position_index")
+
+ state_dict[
+ f"beit.encoder.layer.{i}.attention.attention.relative_position_bias.relative_position_bias_table"
+ ] = table
+ state_dict[
+ f"beit.encoder.layer.{i}.attention.attention.relative_position_bias.relative_position_index"
+ ] = index
+
+
+def rename_key(dct, old, new):
+ val = dct.pop(old)
+ dct[new] = val
+
+
+# We will verify our results on an image of cute cats
+def prepare_img():
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ im = Image.open(requests.get(url, stream=True).raw)
+ return im
+
+
+@torch.no_grad()
+def convert_beit_checkpoint(checkpoint_url, pytorch_dump_folder_path):
+ """
+ Copy/paste/tweak model's weights to our BEiT structure.
+ """
+
+ # define default BEiT configuration
+ config = BeitConfig()
+ has_lm_head = False
+ is_semantic = False
+ repo_id = "huggingface/label-files"
+ # set config parameters based on URL
+ if checkpoint_url[-9:-4] == "pt22k":
+ # masked image modeling
+ config.use_shared_relative_position_bias = True
+ config.use_mask_token = True
+ has_lm_head = True
+ elif checkpoint_url[-9:-4] == "ft22k":
+ # intermediate fine-tuning on ImageNet-22k
+ config.use_relative_position_bias = True
+ config.num_labels = 21841
+ filename = "imagenet-22k-id2label.json"
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
+ id2label = {int(k): v for k, v in id2label.items()}
+ # this dataset contains 21843 labels but the model only has 21841
+ # we delete the classes as mentioned in https://github.com/google-research/big_transfer/issues/18
+ del id2label[9205]
+ del id2label[15027]
+ config.id2label = id2label
+ config.label2id = {v: k for k, v in id2label.items()}
+ elif checkpoint_url[-8:-4] == "to1k":
+ # fine-tuning on ImageNet-1k
+ config.use_relative_position_bias = True
+ config.num_labels = 1000
+ filename = "imagenet-1k-id2label.json"
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
+ id2label = {int(k): v for k, v in id2label.items()}
+ config.id2label = id2label
+ config.label2id = {v: k for k, v in id2label.items()}
+ if "384" in checkpoint_url:
+ config.image_size = 384
+ if "512" in checkpoint_url:
+ config.image_size = 512
+ elif "ade20k" in checkpoint_url:
+ # fine-tuning
+ config.use_relative_position_bias = True
+ config.num_labels = 150
+ filename = "ade20k-id2label.json"
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
+ id2label = {int(k): v for k, v in id2label.items()}
+ config.id2label = id2label
+ config.label2id = {v: k for k, v in id2label.items()}
+ config.image_size = 640
+ is_semantic = True
+ else:
+ raise ValueError("Checkpoint not supported, URL should either end with 'pt22k', 'ft22k', 'to1k' or 'ade20k'")
+
+ # size of the architecture
+ if "base" in checkpoint_url:
+ pass
+ elif "large" in checkpoint_url:
+ config.hidden_size = 1024
+ config.intermediate_size = 4096
+ config.num_hidden_layers = 24
+ config.num_attention_heads = 16
+ if "ade20k" in checkpoint_url:
+ config.image_size = 640
+ config.out_indices = [7, 11, 15, 23]
+ else:
+ raise ValueError("Should either find 'base' or 'large' in checkpoint URL")
+
+ # load state_dict of original model, remove and rename some keys
+ state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu", check_hash=True)
+ state_dict = state_dict["model"] if "ade20k" not in checkpoint_url else state_dict["state_dict"]
+
+ rename_keys = create_rename_keys(config, has_lm_head=has_lm_head, is_semantic=is_semantic)
+ for src, dest in rename_keys:
+ rename_key(state_dict, src, dest)
+ read_in_q_k_v(state_dict, config, has_lm_head=has_lm_head, is_semantic=is_semantic)
+ if is_semantic:
+ # add prefix to decoder keys
+ for key, val in state_dict.copy().items():
+ val = state_dict.pop(key)
+ if key.startswith("backbone.fpn"):
+ key = key.replace("backbone.fpn", "fpn")
+ state_dict[key] = val
+
+ # load HuggingFace model
+ if checkpoint_url[-9:-4] == "pt22k":
+ model = BeitForMaskedImageModeling(config)
+ elif "ade20k" in checkpoint_url:
+ model = BeitForSemanticSegmentation(config)
+ else:
+ model = BeitForImageClassification(config)
+ model.eval()
+ model.load_state_dict(state_dict)
+
+ # Check outputs on an image
+ if is_semantic:
+ image_processor = BeitImageProcessor(size=config.image_size, do_center_crop=False)
+ ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")
+ image = Image.open(ds[0]["file"])
+ else:
+ image_processor = BeitImageProcessor(
+ size=config.image_size, resample=PILImageResampling.BILINEAR, do_center_crop=False
+ )
+ image = prepare_img()
+
+ encoding = image_processor(images=image, return_tensors="pt")
+ pixel_values = encoding["pixel_values"]
+
+ outputs = model(pixel_values)
+ logits = outputs.logits
+
+ # verify logits
+ expected_shape = torch.Size([1, 1000])
+ if checkpoint_url[:-4].endswith("beit_base_patch16_224_pt22k"):
+ expected_shape = torch.Size([1, 196, 8192])
+ elif checkpoint_url[:-4].endswith("beit_large_patch16_224_pt22k"):
+ expected_shape = torch.Size([1, 196, 8192])
+ elif checkpoint_url[:-4].endswith("beit_base_patch16_224_pt22k_ft22k"):
+ expected_shape = torch.Size([1, 21841])
+ expected_logits = torch.tensor([2.2288, 2.4671, 0.7395])
+ expected_class_idx = 2397
+ elif checkpoint_url[:-4].endswith("beit_large_patch16_224_pt22k_ft22k"):
+ expected_shape = torch.Size([1, 21841])
+ expected_logits = torch.tensor([1.6881, -0.2787, 0.5901])
+ expected_class_idx = 2396
+ elif checkpoint_url[:-4].endswith("beit_base_patch16_224_pt22k_ft1k"):
+ expected_logits = torch.tensor([0.1241, 0.0798, -0.6569])
+ expected_class_idx = 285
+ elif checkpoint_url[:-4].endswith("beit_base_patch16_224_pt22k_ft22kto1k"):
+ expected_logits = torch.tensor([-1.2385, -1.0987, -1.0108])
+ expected_class_idx = 281
+ elif checkpoint_url[:-4].endswith("beit_base_patch16_384_pt22k_ft22kto1k"):
+ expected_logits = torch.tensor([-1.5303, -0.9484, -0.3147])
+ expected_class_idx = 761
+ elif checkpoint_url[:-4].endswith("beit_large_patch16_224_pt22k_ft1k"):
+ expected_logits = torch.tensor([0.4610, -0.0928, 0.2086])
+ expected_class_idx = 761
+ elif checkpoint_url[:-4].endswith("beit_large_patch16_224_pt22k_ft22kto1k"):
+ expected_logits = torch.tensor([-0.4804, 0.6257, -0.1837])
+ expected_class_idx = 761
+ elif checkpoint_url[:-4].endswith("beit_large_patch16_384_pt22k_ft22kto1k"):
+ expected_logits = torch.tensor([[-0.5122, 0.5117, -0.2113]])
+ expected_class_idx = 761
+ elif checkpoint_url[:-4].endswith("beit_large_patch16_512_pt22k_ft22kto1k"):
+ expected_logits = torch.tensor([-0.3062, 0.7261, 0.4852])
+ expected_class_idx = 761
+ elif checkpoint_url[:-4].endswith("beit_base_patch16_640_pt22k_ft22ktoade20k"):
+ expected_shape = (1, 150, 160, 160)
+ expected_logits = torch.tensor(
+ [
+ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
+ [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
+ [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
+ ]
+ )
+ elif checkpoint_url[:-4].endswith("beit_large_patch16_640_pt22k_ft22ktoade20k"):
+ expected_shape = (1, 150, 160, 160)
+ expected_logits = torch.tensor(
+ [
+ [[-4.3305, -2.3049, -3.0161], [-2.9591, -1.5305, -2.2251], [-3.4198, -1.8004, -2.9062]],
+ [[-5.8922, -3.7435, -4.3978], [-4.2063, -2.7872, -3.4755], [-4.2791, -3.1874, -4.1681]],
+ [[0.9895, 4.3467, 4.7663], [4.2476, 5.6830, 6.1518], [4.5550, 6.2495, 6.5154]],
+ ]
+ )
+ else:
+ raise ValueError("Can't verify logits as model is not supported")
+
+ if logits.shape != expected_shape:
+ raise ValueError(f"Shape of logits not as expected. {logits.shape=}, {expected_shape=}")
+ if not has_lm_head:
+ if is_semantic:
+ if not torch.allclose(logits[0, :3, :3, :3], expected_logits, atol=1e-3):
+ raise ValueError("First elements of logits not as expected")
+ else:
+ print("Predicted class idx:", logits.argmax(-1).item())
+
+ if not torch.allclose(logits[0, :3], expected_logits, atol=1e-3):
+ raise ValueError("First elements of logits not as expected")
+ if logits.argmax(-1).item() != expected_class_idx:
+ raise ValueError("Predicted class index not as expected")
+
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
+ print(f"Saving model to {pytorch_dump_folder_path}")
+ model.save_pretrained(pytorch_dump_folder_path)
+ print(f"Saving image processor to {pytorch_dump_folder_path}")
+ image_processor.save_pretrained(pytorch_dump_folder_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument(
+ "--checkpoint_url",
+ default="https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_base_patch16_224_pt22k_ft22kto1k.pth",
+ type=str,
+ help="URL to the original PyTorch checkpoint (.pth file).",
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
+ )
+ args = parser.parse_args()
+ convert_beit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/feature_extraction_beit.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/feature_extraction_beit.py
new file mode 100644
index 0000000000000000000000000000000000000000..59dacb4ae51f6e314b96ca8c0e8c368e689c1aa7
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/feature_extraction_beit.py
@@ -0,0 +1,33 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Feature extractor class for BEiT."""
+
+import warnings
+
+from ...utils import logging
+from .image_processing_beit import BeitImageProcessor
+
+
+logger = logging.get_logger(__name__)
+
+
+class BeitFeatureExtractor(BeitImageProcessor):
+ def __init__(self, *args, **kwargs) -> None:
+ warnings.warn(
+ "The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
+ " use BeitImageProcessor instead.",
+ FutureWarning,
+ )
+ super().__init__(*args, **kwargs)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/image_processing_beit.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/image_processing_beit.py
new file mode 100644
index 0000000000000000000000000000000000000000..5e15fe645cf9d9118a59e54a1b29c9e86d520765
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/image_processing_beit.py
@@ -0,0 +1,531 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Image processor class for Beit."""
+
+import warnings
+from typing import Any, Dict, List, Optional, Tuple, Union
+
+import numpy as np
+
+from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
+from ...image_transforms import resize, to_channel_dimension_format
+from ...image_utils import (
+ IMAGENET_STANDARD_MEAN,
+ IMAGENET_STANDARD_STD,
+ ChannelDimension,
+ ImageInput,
+ PILImageResampling,
+ infer_channel_dimension_format,
+ is_scaled_image,
+ make_list_of_images,
+ to_numpy_array,
+ valid_images,
+ validate_kwargs,
+ validate_preprocess_arguments,
+)
+from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
+
+
+if is_vision_available():
+ import PIL
+
+if is_torch_available():
+ import torch
+
+
+logger = logging.get_logger(__name__)
+
+
+class BeitImageProcessor(BaseImageProcessor):
+ r"""
+ Constructs a BEiT image processor.
+
+ Args:
+ do_resize (`bool`, *optional*, defaults to `True`):
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
+ `do_resize` parameter in the `preprocess` method.
+ size (`Dict[str, int]` *optional*, defaults to `{"height": 256, "width": 256}`):
+ Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
+ method.
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
+ Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
+ `preprocess` method.
+ do_center_crop (`bool`, *optional*, defaults to `True`):
+ Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image
+ is padded with 0's and then center cropped. Can be overridden by the `do_center_crop` parameter in the
+ `preprocess` method.
+ crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
+ Desired output size when applying center-cropping. Only has an effect if `do_center_crop` is set to `True`.
+ Can be overridden by the `crop_size` parameter in the `preprocess` method.
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
+ `preprocess` method.
+ do_rescale (`bool`, *optional*, defaults to `True`):
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
+ parameter in the `preprocess` method.
+ do_normalize (`bool`, *optional*, defaults to `True`):
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
+ method.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
+ The mean to use if normalizing the image. This is a float or list of floats of length of the number of
+ channels of the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
+ The standard deviation to use if normalizing the image. This is a float or list of floats of length of the
+ number of channels of the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
+ do_reduce_labels (`bool`, *optional*, defaults to `False`):
+ Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is
+ used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The
+ background label will be replaced by 255. Can be overridden by the `do_reduce_labels` parameter in the
+ `preprocess` method.
+ """
+
+ model_input_names = ["pixel_values"]
+
+ def __init__(
+ self,
+ do_resize: bool = True,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
+ do_center_crop: bool = True,
+ crop_size: Dict[str, int] = None,
+ rescale_factor: Union[int, float] = 1 / 255,
+ do_rescale: bool = True,
+ do_normalize: bool = True,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ do_reduce_labels: bool = False,
+ **kwargs,
+ ) -> None:
+ if "reduce_labels" in kwargs:
+ warnings.warn(
+ "The `reduce_labels` parameter is deprecated and will be removed in a future version. Please use"
+ " `do_reduce_labels` instead.",
+ FutureWarning,
+ )
+ do_reduce_labels = kwargs.pop("reduce_labels")
+ super().__init__(**kwargs)
+ size = size if size is not None else {"height": 256, "width": 256}
+ size = get_size_dict(size)
+ crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
+ self.do_resize = do_resize
+ self.size = size
+ self.resample = resample
+ self.do_center_crop = do_center_crop
+ self.crop_size = crop_size
+ self.do_rescale = do_rescale
+ self.rescale_factor = rescale_factor
+ self.do_normalize = do_normalize
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
+ self.do_reduce_labels = do_reduce_labels
+ self._valid_processor_keys = [
+ "images",
+ "segmentation_maps",
+ "do_resize",
+ "size",
+ "resample",
+ "do_center_crop",
+ "crop_size",
+ "do_rescale",
+ "rescale_factor",
+ "do_normalize",
+ "image_mean",
+ "image_std",
+ "do_reduce_labels",
+ "return_tensors",
+ "data_format",
+ "input_data_format",
+ ]
+
+ @classmethod
+ def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs):
+ """
+ Overrides the `from_dict` method from the base class to make sure `reduce_labels` is updated if image processor
+ is created using from_dict and kwargs e.g. `BeitImageProcessor.from_pretrained(checkpoint, reduce_labels=True)`
+ """
+ image_processor_dict = image_processor_dict.copy()
+ if "reduce_labels" in kwargs:
+ image_processor_dict["reduce_labels"] = kwargs.pop("reduce_labels")
+ return super().from_dict(image_processor_dict, **kwargs)
+
+ def resize(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> np.ndarray:
+ """
+ Resize an image to (size["height"], size["width"]).
+
+ Args:
+ image (`np.ndarray`):
+ Image to resize.
+ size (`Dict[str, int]`):
+ Size of the output image.
+ resample (`PILImageResampling`, *optional*, defaults to `PIL.Image.BICUBIC`):
+ Resampling filter to use when resiizing the image.
+ data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
+ input_data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred.
+ """
+ size = get_size_dict(size, default_to_square=True, param_name="size")
+ if "height" not in size or "width" not in size:
+ raise ValueError(f"The `size` argument must contain `height` and `width` keys. Got {size.keys()}")
+ return resize(
+ image,
+ size=(size["height"], size["width"]),
+ resample=resample,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ **kwargs,
+ )
+
+ def reduce_label(self, label: ImageInput) -> np.ndarray:
+ label = to_numpy_array(label)
+ # Avoid using underflow conversion
+ label[label == 0] = 255
+ label = label - 1
+ label[label == 254] = 255
+ return label
+
+ def _preprocess(
+ self,
+ image: ImageInput,
+ do_reduce_labels: bool = None,
+ do_resize: bool = None,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = None,
+ do_center_crop: bool = None,
+ crop_size: Dict[str, int] = None,
+ do_rescale: bool = None,
+ rescale_factor: float = None,
+ do_normalize: bool = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ):
+ if do_reduce_labels:
+ image = self.reduce_label(image)
+
+ if do_resize:
+ image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
+
+ if do_center_crop:
+ image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)
+
+ if do_rescale:
+ image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
+
+ if do_normalize:
+ image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
+
+ return image
+
+ def _preprocess_image(
+ self,
+ image: ImageInput,
+ do_resize: bool = None,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = None,
+ do_center_crop: bool = None,
+ crop_size: Dict[str, int] = None,
+ do_rescale: bool = None,
+ rescale_factor: float = None,
+ do_normalize: bool = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> np.ndarray:
+ """Preprocesses a single image."""
+ # All transformations expect numpy arrays.
+ image = to_numpy_array(image)
+ if is_scaled_image(image) and do_rescale:
+ logger.warning_once(
+ "It looks like you are trying to rescale already rescaled images. If the input"
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
+ )
+ if input_data_format is None:
+ input_data_format = infer_channel_dimension_format(image)
+ image = self._preprocess(
+ image,
+ do_reduce_labels=False,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ do_center_crop=do_center_crop,
+ crop_size=crop_size,
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ input_data_format=input_data_format,
+ )
+ if data_format is not None:
+ image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
+ return image
+
+ def _preprocess_segmentation_map(
+ self,
+ segmentation_map: ImageInput,
+ do_resize: bool = None,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = None,
+ do_center_crop: bool = None,
+ crop_size: Dict[str, int] = None,
+ do_reduce_labels: bool = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ):
+ """Preprocesses a single segmentation map."""
+ # All transformations expect numpy arrays.
+ segmentation_map = to_numpy_array(segmentation_map)
+ # Add an axis to the segmentation maps for transformations.
+ if segmentation_map.ndim == 2:
+ segmentation_map = segmentation_map[None, ...]
+ added_dimension = True
+ input_data_format = ChannelDimension.FIRST
+ else:
+ added_dimension = False
+ if input_data_format is None:
+ input_data_format = infer_channel_dimension_format(segmentation_map, num_channels=1)
+ segmentation_map = self._preprocess(
+ image=segmentation_map,
+ do_reduce_labels=do_reduce_labels,
+ do_resize=do_resize,
+ resample=resample,
+ size=size,
+ do_center_crop=do_center_crop,
+ crop_size=crop_size,
+ do_normalize=False,
+ do_rescale=False,
+ input_data_format=ChannelDimension.FIRST,
+ )
+ # Remove extra axis if added
+ if added_dimension:
+ segmentation_map = np.squeeze(segmentation_map, axis=0)
+ segmentation_map = segmentation_map.astype(np.int64)
+ return segmentation_map
+
+ def __call__(self, images, segmentation_maps=None, **kwargs):
+ # Overrides the `__call__` method of the `Preprocessor` class such that the images and segmentation maps can both
+ # be passed in as positional arguments.
+ return super().__call__(images, segmentation_maps=segmentation_maps, **kwargs)
+
+ def preprocess(
+ self,
+ images: ImageInput,
+ segmentation_maps: Optional[ImageInput] = None,
+ do_resize: bool = None,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = None,
+ do_center_crop: bool = None,
+ crop_size: Dict[str, int] = None,
+ do_rescale: bool = None,
+ rescale_factor: float = None,
+ do_normalize: bool = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ do_reduce_labels: Optional[bool] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ data_format: ChannelDimension = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> PIL.Image.Image:
+ """
+ Preprocess an image or batch of images.
+
+ Args:
+ images (`ImageInput`):
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
+ segmentation_maps (`ImageInput`, *optional*)
+ Segmentation maps to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
+ Whether to resize the image.
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
+ Size of the image after resizing.
+ resample (`int`, *optional*, defaults to `self.resample`):
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
+ has an effect if `do_resize` is set to `True`.
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
+ Whether to center crop the image.
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
+ Size of the image after center crop. If one edge the image is smaller than `crop_size`, it will be
+ padded with zeros and then cropped
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
+ Whether to rescale the image values between [0 - 1].
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
+ Whether to normalize the image.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
+ Image mean.
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
+ Image standard deviation.
+ do_reduce_labels (`bool`, *optional*, defaults to `self.do_reduce_labels`):
+ Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0
+ is used for background, and background itself is not included in all classes of a dataset (e.g.
+ ADE20k). The background label will be replaced by 255.
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Can be one of:
+ - Unset: Return a list of `np.ndarray`.
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format for the output image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - Unset: Use the channel dimension format of the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ """
+ do_resize = do_resize if do_resize is not None else self.do_resize
+ size = size if size is not None else self.size
+ size = get_size_dict(size, default_to_square=True, param_name="size")
+ resample = resample if resample is not None else self.resample
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
+ crop_size = crop_size if crop_size is not None else self.crop_size
+ crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size")
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
+ image_mean = image_mean if image_mean is not None else self.image_mean
+ image_std = image_std if image_std is not None else self.image_std
+ do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels
+
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
+
+ images = make_list_of_images(images)
+
+ if segmentation_maps is not None:
+ segmentation_maps = make_list_of_images(segmentation_maps, expected_ndims=2)
+
+ if segmentation_maps is not None and not valid_images(segmentation_maps):
+ raise ValueError(
+ "Invalid segmentation_maps type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+ if not valid_images(images):
+ raise ValueError(
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+
+ validate_preprocess_arguments(
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ do_center_crop=do_center_crop,
+ crop_size=crop_size,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ )
+
+ images = [
+ self._preprocess_image(
+ image=img,
+ do_resize=do_resize,
+ do_center_crop=do_center_crop,
+ do_rescale=do_rescale,
+ do_normalize=do_normalize,
+ resample=resample,
+ size=size,
+ rescale_factor=rescale_factor,
+ crop_size=crop_size,
+ image_mean=image_mean,
+ image_std=image_std,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ )
+ for img in images
+ ]
+
+ data = {"pixel_values": images}
+
+ if segmentation_maps is not None:
+ segmentation_maps = [
+ self._preprocess_segmentation_map(
+ segmentation_map=segmentation_map,
+ do_reduce_labels=do_reduce_labels,
+ do_resize=do_resize,
+ resample=resample,
+ size=size,
+ do_center_crop=do_center_crop,
+ crop_size=crop_size,
+ )
+ for segmentation_map in segmentation_maps
+ ]
+ data["labels"] = segmentation_maps
+
+ return BatchFeature(data=data, tensor_type=return_tensors)
+
+ def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple] = None):
+ """
+ Converts the output of [`BeitForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch.
+
+ Args:
+ outputs ([`BeitForSemanticSegmentation`]):
+ Raw outputs of the model.
+ target_sizes (`List[Tuple]` of length `batch_size`, *optional*):
+ List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
+ predictions will not be resized.
+
+ Returns:
+ semantic_segmentation: `List[torch.Tensor]` of length `batch_size`, where each item is a semantic
+ segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
+ specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
+ """
+ # TODO: add support for other frameworks
+ logits = outputs.logits
+
+ # Resize logits and compute semantic segmentation maps
+ if target_sizes is not None:
+ if len(logits) != len(target_sizes):
+ raise ValueError(
+ "Make sure that you pass in as many target sizes as the batch dimension of the logits"
+ )
+
+ if is_torch_tensor(target_sizes):
+ target_sizes = target_sizes.numpy()
+
+ semantic_segmentation = []
+
+ for idx in range(len(logits)):
+ resized_logits = torch.nn.functional.interpolate(
+ logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
+ )
+ semantic_map = resized_logits[0].argmax(dim=0)
+ semantic_segmentation.append(semantic_map)
+ else:
+ semantic_segmentation = logits.argmax(dim=1)
+ semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
+
+ return semantic_segmentation
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/modeling_beit.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/modeling_beit.py
new file mode 100644
index 0000000000000000000000000000000000000000..d04717039ec90967fe326e25a88e2faa2a883fe2
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/modeling_beit.py
@@ -0,0 +1,1425 @@
+# coding=utf-8
+# Copyright 2021 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch BEiT model."""
+
+
+import collections.abc
+import math
+from dataclasses import dataclass
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import Tensor, nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import (
+ BackboneOutput,
+ BaseModelOutput,
+ BaseModelOutputWithPooling,
+ ImageClassifierOutput,
+ MaskedLMOutput,
+ SemanticSegmenterOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import find_pruneable_heads_and_indices, meshgrid, prune_linear_layer
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from ...utils.backbone_utils import BackboneMixin
+from .configuration_beit import BeitConfig
+
+
+logger = logging.get_logger(__name__)
+
+# General docstring
+_CONFIG_FOR_DOC = "BeitConfig"
+
+# Base docstring
+_CHECKPOINT_FOR_DOC = "microsoft/beit-base-patch16-224-pt22k"
+_EXPECTED_OUTPUT_SHAPE = [1, 197, 768]
+
+# Image classification docstring
+_IMAGE_CLASS_CHECKPOINT = "microsoft/beit-base-patch16-224"
+_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
+
+
+from ..deprecated._archive_maps import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+@dataclass
+class BeitModelOutputWithPooling(BaseModelOutputWithPooling):
+ """
+ Class for outputs of [`BeitModel`].
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
+ Average of the last layer hidden states of the patch tokens (excluding the *[CLS]* token) if
+ *config.use_mean_pooling* is set to True. If set to False, then the final hidden state of the *[CLS]* token
+ will be returned.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+
+def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
+ """
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
+
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
+ argument.
+ """
+ if drop_prob == 0.0 or not training:
+ return input
+ keep_prob = 1 - drop_prob
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
+ random_tensor.floor_() # binarize
+ output = input.div(keep_prob) * random_tensor
+ return output
+
+
+class BeitDropPath(nn.Module):
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
+
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
+ super().__init__()
+ self.drop_prob = drop_prob
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ return drop_path(hidden_states, self.drop_prob, self.training)
+
+ def extra_repr(self) -> str:
+ return "p={}".format(self.drop_prob)
+
+
+# Based on timm implementation, which can be found here:
+# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
+class BeitEmbeddings(nn.Module):
+ """
+ Construct the CLS token, position and patch embeddings. Optionally, also the mask token.
+
+ """
+
+ def __init__(self, config: BeitConfig) -> None:
+ super().__init__()
+
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
+ if config.use_mask_token:
+ self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
+ else:
+ self.mask_token = None
+ self.patch_embeddings = BeitPatchEmbeddings(config)
+ num_patches = self.patch_embeddings.num_patches
+ if config.use_absolute_position_embeddings:
+ self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size))
+ else:
+ self.position_embeddings = None
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor] = None) -> torch.Tensor:
+ embeddings, (patch_height, patch_width) = self.patch_embeddings(
+ pixel_values, self.position_embeddings[:, 1:, :] if self.position_embeddings is not None else None
+ )
+ batch_size, seq_len, _ = embeddings.size()
+
+ if bool_masked_pos is not None:
+ mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
+ # replace the masked visual tokens by mask_tokens
+ w = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
+ embeddings = embeddings * (1 - w) + mask_tokens * w
+
+ cls_tokens = self.cls_token.expand(batch_size, -1, -1)
+ if self.position_embeddings is not None:
+ cls_tokens = cls_tokens + self.position_embeddings[:, :1, :]
+
+ embeddings = torch.cat((cls_tokens, embeddings), dim=1)
+
+ embeddings = self.dropout(embeddings)
+
+ return embeddings, (patch_height, patch_width)
+
+
+class BeitPatchEmbeddings(nn.Module):
+ """
+ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
+ `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
+ Transformer.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ image_size, patch_size = config.image_size, config.patch_size
+ num_channels, hidden_size = config.num_channels, config.hidden_size
+
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
+ patch_shape = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.num_patches = num_patches
+ self.patch_shape = patch_shape
+
+ self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
+
+ def forward(self, pixel_values: torch.Tensor, position_embedding: Optional[torch.Tensor] = None) -> torch.Tensor:
+ batch_size, num_channels, height, width = pixel_values.shape
+ if num_channels != self.num_channels:
+ raise ValueError(
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
+ )
+
+ embeddings = self.projection(pixel_values)
+ patch_height, patch_width = embeddings.shape[2], embeddings.shape[3]
+
+ if position_embedding is not None:
+ # interpolate the position embedding to the corresponding size
+ position_embedding = position_embedding.view(1, self.patch_shape[0], self.patch_shape[1], -1).permute(
+ 0, 3, 1, 2
+ )
+ position_embedding = nn.functional.interpolate(
+ position_embedding, size=(patch_height, patch_width), mode="bicubic"
+ )
+ embeddings = embeddings + position_embedding
+
+ embeddings = embeddings.flatten(2).transpose(1, 2)
+
+ return embeddings, (patch_height, patch_width)
+
+
+class BeitSelfAttention(nn.Module):
+ def __init__(self, config: BeitConfig, window_size: Optional[tuple] = None) -> None:
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
+ f"heads {config.num_attention_heads}."
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+
+ if window_size:
+ self.relative_position_bias = BeitRelativePositionBias(config, window_size=window_size)
+ else:
+ self.relative_position_bias = None
+
+ def transpose_for_scores(self, x):
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(*new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ relative_position_bias: Optional["BeitRelativePositionBias"] = None,
+ ) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]:
+ mixed_query_layer = self.query(hidden_states)
+
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+
+ # Add relative position bias if present.
+ if self.relative_position_bias is not None:
+ attention_scores = attention_scores + self.relative_position_bias().unsqueeze(0)
+
+ # Add shared relative position bias if provided.
+ if relative_position_bias is not None:
+ attention_scores = attention_scores + relative_position_bias
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(*new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ return outputs
+
+
+class BeitSelfOutput(nn.Module):
+ """
+ The residual connection is defined in BeitLayer instead of here (as is the case with other models), due to the
+ layernorm applied before each block.
+ """
+
+ def __init__(self, config: BeitConfig) -> None:
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor, gamma=None) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ return hidden_states
+
+
+class BeitAttention(nn.Module):
+ def __init__(self, config: BeitConfig, window_size: Optional[tuple] = None) -> None:
+ super().__init__()
+ self.attention = BeitSelfAttention(config, window_size=window_size)
+ self.output = BeitSelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.attention.query = prune_linear_layer(self.attention.query, index)
+ self.attention.key = prune_linear_layer(self.attention.key, index)
+ self.attention.value = prune_linear_layer(self.attention.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ relative_position_bias: Optional["BeitRelativePositionBias"] = None,
+ ) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]:
+ self_outputs = self.attention(hidden_states, head_mask, output_attentions, relative_position_bias)
+
+ attention_output = self.output(self_outputs[0], hidden_states)
+
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+class BeitIntermediate(nn.Module):
+ def __init__(self, config: BeitConfig) -> None:
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+
+ return hidden_states
+
+
+class BeitOutput(nn.Module):
+ def __init__(self, config: BeitConfig) -> None:
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ return hidden_states
+
+
+class BeitLayer(nn.Module):
+ """This corresponds to the Block class in the timm implementation."""
+
+ def __init__(self, config: BeitConfig, window_size: Optional[tuple] = None, drop_path_rate: float = 0.0) -> None:
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.attention = BeitAttention(config, window_size=window_size)
+ self.intermediate = BeitIntermediate(config)
+ self.output = BeitOutput(config)
+ self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.drop_path = BeitDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
+ self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ init_values = config.layer_scale_init_value
+ if init_values > 0:
+ self.lambda_1 = nn.Parameter(init_values * torch.ones((config.hidden_size)), requires_grad=True)
+ self.lambda_2 = nn.Parameter(init_values * torch.ones((config.hidden_size)), requires_grad=True)
+ else:
+ self.lambda_1, self.lambda_2 = None, None
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ relative_position_bias: Optional["BeitRelativePositionBias"] = None,
+ ) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]:
+ self_attention_outputs = self.attention(
+ self.layernorm_before(hidden_states), # in BEiT, layernorm is applied before self-attention
+ head_mask,
+ output_attentions=output_attentions,
+ relative_position_bias=relative_position_bias,
+ )
+ attention_output = self_attention_outputs[0]
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ # apply lambda_1 if present
+ if self.lambda_1 is not None:
+ attention_output = self.lambda_1 * attention_output
+
+ # first residual connection
+ hidden_states = self.drop_path(attention_output) + hidden_states
+
+ # in BEiT, layernorm is also applied after self-attention
+ layer_output = self.layernorm_after(hidden_states)
+
+ layer_output = self.intermediate(layer_output)
+ layer_output = self.output(layer_output)
+
+ if self.lambda_2 is not None:
+ layer_output = self.lambda_2 * layer_output
+
+ # second residual connection
+ layer_output = self.drop_path(layer_output) + hidden_states
+
+ outputs = (layer_output,) + outputs
+
+ return outputs
+
+
+class BeitRelativePositionBias(nn.Module):
+ def __init__(self, config: BeitConfig, window_size: tuple) -> None:
+ super().__init__()
+ self.window_size = window_size
+ self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
+ self.relative_position_bias_table = nn.Parameter(
+ torch.zeros(self.num_relative_distance, config.num_attention_heads)
+ ) # 2*Wh-1 * 2*Ww-1, nH
+ # cls to token & token 2 cls & cls to cls
+
+ # get pair-wise relative position index for each token inside the window
+ coords_h = torch.arange(window_size[0])
+ coords_w = torch.arange(window_size[1])
+ coords = torch.stack(meshgrid([coords_h, coords_w], indexing="ij")) # 2, Wh, Ww
+ coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
+ relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
+ relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
+ relative_coords[:, :, 1] += window_size[1] - 1
+ relative_coords[:, :, 0] *= 2 * window_size[1] - 1
+ relative_position_index = torch.zeros(
+ size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype
+ )
+ relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
+ relative_position_index[0, 0:] = self.num_relative_distance - 3
+ relative_position_index[0:, 0] = self.num_relative_distance - 2
+ relative_position_index[0, 0] = self.num_relative_distance - 1
+
+ self.register_buffer("relative_position_index", relative_position_index, persistent=False)
+
+ def forward(self) -> torch.Tensor:
+ relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
+ self.window_size[0] * self.window_size[1] + 1, self.window_size[0] * self.window_size[1] + 1, -1
+ ) # Wh*Ww,Wh*Ww,nH
+
+ return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
+
+
+class BeitEncoder(nn.Module):
+ def __init__(self, config: BeitConfig, window_size: Optional[tuple] = None) -> None:
+ super().__init__()
+ self.config = config
+ if config.use_shared_relative_position_bias:
+ self.relative_position_bias = BeitRelativePositionBias(config, window_size=window_size)
+ else:
+ self.relative_position_bias = None
+
+ # stochastic depth decay rule
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)]
+ self.layer = nn.ModuleList(
+ [
+ BeitLayer(
+ config,
+ window_size=window_size if config.use_relative_position_bias else None,
+ drop_path_rate=dpr[i],
+ )
+ for i in range(config.num_hidden_layers)
+ ]
+ )
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ) -> Union[tuple, BaseModelOutput]:
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ layer_head_mask,
+ output_attentions,
+ )
+ else:
+ relative_position_bias = (
+ self.relative_position_bias() if self.relative_position_bias is not None else None
+ )
+ layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions, relative_position_bias)
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+
+class BeitPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = BeitConfig
+ base_model_prefix = "beit"
+ main_input_name = "pixel_values"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, (nn.Linear, nn.Conv2d, nn.ConvTranspose2d)):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+BEIT_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`BeitConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+BEIT_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
+ [`BeitImageProcessor.__call__`] for details.
+
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare Beit Model transformer outputting raw hidden-states without any specific head on top.",
+ BEIT_START_DOCSTRING,
+)
+class BeitModel(BeitPreTrainedModel):
+ def __init__(self, config: BeitConfig, add_pooling_layer: bool = True) -> None:
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = BeitEmbeddings(config)
+ self.encoder = BeitEncoder(config, window_size=self.embeddings.patch_embeddings.patch_shape)
+
+ self.layernorm = (
+ nn.Identity() if config.use_mean_pooling else nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ )
+ self.pooler = BeitPooler(config) if add_pooling_layer else None
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.patch_embeddings
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BeitModelOutputWithPooling,
+ config_class=_CONFIG_FOR_DOC,
+ modality="vision",
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
+ )
+ def forward(
+ self,
+ pixel_values: Optional[torch.Tensor] = None,
+ bool_masked_pos: Optional[torch.BoolTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[tuple, BeitModelOutputWithPooling]:
+ r"""
+ bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ embedding_output, (patch_height, patch_width) = self.embeddings(pixel_values, bool_masked_pos)
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+ sequence_output = self.layernorm(sequence_output)
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
+
+ if not return_dict:
+ head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,)
+ return head_outputs + encoder_outputs[1:]
+
+ return BeitModelOutputWithPooling(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+class BeitPooler(nn.Module):
+ def __init__(self, config: BeitConfig) -> None:
+ super().__init__()
+ self.layernorm = (
+ nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) if config.use_mean_pooling else None
+ )
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ if self.layernorm is not None:
+ # Mean pool the final hidden states of the patch tokens
+ patch_tokens = hidden_states[:, 1:, :]
+ pooled_output = self.layernorm(patch_tokens.mean(1))
+ else:
+ # Pool by simply taking the final hidden state of the [CLS] token
+ pooled_output = hidden_states[:, 0]
+
+ return pooled_output
+
+
+@add_start_docstrings(
+ """Beit Model transformer with a 'language' modeling head on top. BEiT does masked image modeling by predicting
+ visual tokens of a Vector-Quantize Variational Autoencoder (VQ-VAE), whereas other vision models like ViT and DeiT
+ predict RGB pixel values. As a result, this class is incompatible with [`AutoModelForMaskedImageModeling`], so you
+ will need to use [`BeitForMaskedImageModeling`] directly if you wish to do masked image modeling with BEiT.""",
+ BEIT_START_DOCSTRING,
+)
+class BeitForMaskedImageModeling(BeitPreTrainedModel):
+ def __init__(self, config: BeitConfig) -> None:
+ super().__init__(config)
+
+ self.num_labels = config.num_labels
+ self.beit = BeitModel(config, add_pooling_layer=False)
+
+ # Classifier head
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: Optional[torch.Tensor] = None,
+ bool_masked_pos: Optional[torch.BoolTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[tuple, MaskedLMOutput]:
+ r"""
+ bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
+
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, BeitForMaskedImageModeling
+ >>> import torch
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-patch16-224-pt22k")
+ >>> model = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k")
+
+ >>> num_patches = (model.config.image_size // model.config.patch_size) ** 2
+ >>> pixel_values = image_processor(images=image, return_tensors="pt").pixel_values
+ >>> # create random boolean mask of shape (batch_size, num_patches)
+ >>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool()
+
+ >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
+ >>> loss, logits = outputs.loss, outputs.logits
+ >>> list(logits.shape)
+ [1, 196, 8192]
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.beit(
+ pixel_values,
+ bool_masked_pos=bool_masked_pos,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+ sequence_output = self.layernorm(sequence_output)
+ prediction_scores = self.lm_head(sequence_output[:, 1:])
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
+ masked_lm_loss = loss_fct(prediction_scores[bool_masked_pos], labels)
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[1:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return MaskedLMOutput(
+ loss=masked_lm_loss,
+ logits=prediction_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Beit Model transformer with an image classification head on top (a linear layer on top of the average of the final
+ hidden states of the patch tokens) e.g. for ImageNet.
+ """,
+ BEIT_START_DOCSTRING,
+)
+class BeitForImageClassification(BeitPreTrainedModel):
+ def __init__(self, config: BeitConfig) -> None:
+ super().__init__(config)
+
+ self.num_labels = config.num_labels
+ self.beit = BeitModel(config, add_pooling_layer=True)
+
+ # Classifier head
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
+ output_type=ImageClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
+ )
+ def forward(
+ self,
+ pixel_values: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[tuple, ImageClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ outputs = self.beit(
+ pixel_values,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
+
+ logits = self.classifier(pooled_output)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return ImageClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+class BeitConvModule(nn.Module):
+ """
+ A convolutional block that bundles conv/norm/activation layers. This block simplifies the usage of convolution
+ layers, which are commonly used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU).
+
+ Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
+ """
+
+ def __init__(
+ self,
+ in_channels: int,
+ out_channels: int,
+ kernel_size: Union[int, Tuple[int, int]],
+ padding: Union[int, Tuple[int, int], str] = 0,
+ bias: bool = False,
+ dilation: Union[int, Tuple[int, int]] = 1,
+ ) -> None:
+ super().__init__()
+ self.conv = nn.Conv2d(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=kernel_size,
+ padding=padding,
+ bias=bias,
+ dilation=dilation,
+ )
+ self.bn = nn.BatchNorm2d(out_channels)
+ self.activation = nn.ReLU()
+
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
+ output = self.conv(input)
+ output = self.bn(output)
+ output = self.activation(output)
+
+ return output
+
+
+class BeitPyramidPoolingBlock(nn.Module):
+ def __init__(self, pool_scale: int, in_channels: int, channels: int) -> None:
+ super().__init__()
+ self.layers = [
+ nn.AdaptiveAvgPool2d(pool_scale),
+ BeitConvModule(in_channels, channels, kernel_size=1),
+ ]
+ for i, layer in enumerate(self.layers):
+ self.add_module(str(i), layer)
+
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
+ hidden_state = input
+ for layer in self.layers:
+ hidden_state = layer(hidden_state)
+ return hidden_state
+
+
+class BeitPyramidPoolingModule(nn.Module):
+ """
+ Pyramid Pooling Module (PPM) used in PSPNet.
+
+ Args:
+ pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
+ Module.
+ in_channels (int): Input channels.
+ channels (int): Channels after modules, before conv_seg.
+ align_corners (bool): align_corners argument of F.interpolate.
+
+ Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
+ """
+
+ def __init__(self, pool_scales: Tuple[int, ...], in_channels: int, channels: int, align_corners: bool) -> None:
+ super().__init__()
+ self.pool_scales = pool_scales
+ self.align_corners = align_corners
+ self.in_channels = in_channels
+ self.channels = channels
+ self.blocks = []
+ for i, pool_scale in enumerate(pool_scales):
+ block = BeitPyramidPoolingBlock(pool_scale=pool_scale, in_channels=in_channels, channels=channels)
+ self.blocks.append(block)
+ self.add_module(str(i), block)
+
+ def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
+ ppm_outs = []
+ for ppm in self.blocks:
+ ppm_out = ppm(x)
+ upsampled_ppm_out = nn.functional.interpolate(
+ ppm_out, size=x.size()[2:], mode="bilinear", align_corners=self.align_corners
+ )
+ ppm_outs.append(upsampled_ppm_out)
+ return ppm_outs
+
+
+class BeitUperHead(nn.Module):
+ """
+ Unified Perceptual Parsing for Scene Understanding. This head is the implementation of
+ [UPerNet](https://arxiv.org/abs/1807.10221).
+
+ Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
+ """
+
+ def __init__(self, config: BeitConfig) -> None:
+ super().__init__()
+
+ self.pool_scales = config.pool_scales # e.g. (1, 2, 3, 6)
+ self.in_channels = [config.hidden_size] * 4 # e.g. [768, 768, 768, 768]
+ self.channels = config.hidden_size
+ self.align_corners = False
+ self.classifier = nn.Conv2d(self.channels, config.num_labels, kernel_size=1)
+
+ # PSP Module
+ self.psp_modules = BeitPyramidPoolingModule(
+ self.pool_scales,
+ self.in_channels[-1],
+ self.channels,
+ align_corners=self.align_corners,
+ )
+ self.bottleneck = BeitConvModule(
+ self.in_channels[-1] + len(self.pool_scales) * self.channels,
+ self.channels,
+ kernel_size=3,
+ padding=1,
+ )
+ # FPN Module
+ self.lateral_convs = nn.ModuleList()
+ self.fpn_convs = nn.ModuleList()
+ for in_channels in self.in_channels[:-1]: # skip the top layer
+ l_conv = BeitConvModule(in_channels, self.channels, kernel_size=1)
+ fpn_conv = BeitConvModule(self.channels, self.channels, kernel_size=3, padding=1)
+ self.lateral_convs.append(l_conv)
+ self.fpn_convs.append(fpn_conv)
+
+ self.fpn_bottleneck = BeitConvModule(
+ len(self.in_channels) * self.channels,
+ self.channels,
+ kernel_size=3,
+ padding=1,
+ )
+
+ def psp_forward(self, inputs):
+ x = inputs[-1]
+ psp_outs = [x]
+ psp_outs.extend(self.psp_modules(x))
+ psp_outs = torch.cat(psp_outs, dim=1)
+ output = self.bottleneck(psp_outs)
+
+ return output
+
+ def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor:
+ # build laterals
+ laterals = [lateral_conv(encoder_hidden_states[i]) for i, lateral_conv in enumerate(self.lateral_convs)]
+
+ laterals.append(self.psp_forward(encoder_hidden_states))
+
+ # build top-down path
+ used_backbone_levels = len(laterals)
+ for i in range(used_backbone_levels - 1, 0, -1):
+ prev_shape = laterals[i - 1].shape[2:]
+ laterals[i - 1] = laterals[i - 1] + nn.functional.interpolate(
+ laterals[i], size=prev_shape, mode="bilinear", align_corners=self.align_corners
+ )
+
+ # build outputs
+ fpn_outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels - 1)]
+ # append psp feature
+ fpn_outs.append(laterals[-1])
+
+ for i in range(used_backbone_levels - 1, 0, -1):
+ fpn_outs[i] = nn.functional.interpolate(
+ fpn_outs[i], size=fpn_outs[0].shape[2:], mode="bilinear", align_corners=self.align_corners
+ )
+ fpn_outs = torch.cat(fpn_outs, dim=1)
+ output = self.fpn_bottleneck(fpn_outs)
+ output = self.classifier(output)
+
+ return output
+
+
+class BeitFCNHead(nn.Module):
+ """
+ Fully Convolution Networks for Semantic Segmentation. This head is implemented of
+ [FCNNet](https://arxiv.org/abs/1411.4038>).
+
+ Args:
+ config (BeitConfig): Configuration.
+ in_channels
+ kernel_size (int): The kernel size for convs in the head. Default: 3.
+ dilation (int): The dilation rate for convs in the head. Default: 1.
+
+
+ Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
+ """
+
+ def __init__(
+ self, config: BeitConfig, in_index: int = 2, kernel_size: int = 3, dilation: Union[int, Tuple[int, int]] = 1
+ ) -> None:
+ super().__init__()
+ self.in_channels = config.hidden_size
+ self.channels = config.auxiliary_channels
+ self.num_convs = config.auxiliary_num_convs
+ self.concat_input = config.auxiliary_concat_input
+ self.in_index = in_index
+
+ conv_padding = (kernel_size // 2) * dilation
+ convs = []
+ convs.append(
+ BeitConvModule(
+ self.in_channels, self.channels, kernel_size=kernel_size, padding=conv_padding, dilation=dilation
+ )
+ )
+ for i in range(self.num_convs - 1):
+ convs.append(
+ BeitConvModule(
+ self.channels, self.channels, kernel_size=kernel_size, padding=conv_padding, dilation=dilation
+ )
+ )
+ if self.num_convs == 0:
+ self.convs = nn.Identity()
+ else:
+ self.convs = nn.Sequential(*convs)
+ if self.concat_input:
+ self.conv_cat = BeitConvModule(
+ self.in_channels + self.channels, self.channels, kernel_size=kernel_size, padding=kernel_size // 2
+ )
+
+ self.classifier = nn.Conv2d(self.channels, config.num_labels, kernel_size=1)
+
+ def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor:
+ # just take the relevant feature maps
+ hidden_states = encoder_hidden_states[self.in_index]
+ output = self.convs(hidden_states)
+ if self.concat_input:
+ output = self.conv_cat(torch.cat([hidden_states, output], dim=1))
+ output = self.classifier(output)
+ return output
+
+
+@add_start_docstrings(
+ """
+ Beit Model transformer with a semantic segmentation head on top e.g. for ADE20k, CityScapes.
+ """,
+ BEIT_START_DOCSTRING,
+)
+class BeitForSemanticSegmentation(BeitPreTrainedModel):
+ def __init__(self, config: BeitConfig) -> None:
+ super().__init__(config)
+
+ self.num_labels = config.num_labels
+ self.beit = BeitModel(config, add_pooling_layer=False)
+
+ # FPNs
+ if len(self.config.out_indices) != 4:
+ raise ValueError(
+ "BeitForSemanticSegmentation requires config.out_indices to be a list of 4 integers, "
+ "specifying which features to use from the backbone. One can use [3, 5, 7, 11] in case of "
+ "a base-sized architecture."
+ )
+ self.fpn1 = nn.Sequential(
+ nn.ConvTranspose2d(config.hidden_size, config.hidden_size, kernel_size=2, stride=2),
+ nn.BatchNorm2d(config.hidden_size),
+ nn.GELU(),
+ nn.ConvTranspose2d(config.hidden_size, config.hidden_size, kernel_size=2, stride=2),
+ )
+ self.fpn2 = nn.Sequential(
+ nn.ConvTranspose2d(config.hidden_size, config.hidden_size, kernel_size=2, stride=2),
+ )
+ self.fpn3 = nn.Identity()
+ self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2)
+
+ # Semantic segmentation head(s)
+ self.decode_head = BeitUperHead(config)
+ self.auxiliary_head = BeitFCNHead(config) if config.use_auxiliary_head else None
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def compute_loss(self, logits, auxiliary_logits, labels):
+ # upsample logits to the images' original size
+ upsampled_logits = nn.functional.interpolate(
+ logits, size=labels.shape[-2:], mode="bilinear", align_corners=False
+ )
+ if auxiliary_logits is not None:
+ upsampled_auxiliary_logits = nn.functional.interpolate(
+ auxiliary_logits, size=labels.shape[-2:], mode="bilinear", align_corners=False
+ )
+ # compute weighted loss
+ loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index)
+ main_loss = loss_fct(upsampled_logits, labels)
+ loss = main_loss
+ if auxiliary_logits is not None:
+ auxiliary_loss = loss_fct(upsampled_auxiliary_logits, labels)
+ loss += self.config.auxiliary_loss_weight * auxiliary_loss
+
+ return loss
+
+ @add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=SemanticSegmenterOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[tuple, SemanticSegmenterOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
+ Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, BeitForSemanticSegmentation
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-finetuned-ade-640-640")
+ >>> model = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640")
+
+ >>> inputs = image_processor(images=image, return_tensors="pt")
+ >>> outputs = model(**inputs)
+ >>> # logits are of shape (batch_size, num_labels, height, width)
+ >>> logits = outputs.logits
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+
+ outputs = self.beit(
+ pixel_values,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=True, # we need the intermediate hidden states
+ return_dict=return_dict,
+ )
+
+ encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1]
+
+ # only keep certain features, and reshape
+ # note that we do +1 as the encoder_hidden_states also includes the initial embeddings
+ features = [feature for idx, feature in enumerate(encoder_hidden_states) if idx + 1 in self.config.out_indices]
+ batch_size = pixel_values.shape[0]
+ patch_resolution = self.config.image_size // self.config.patch_size
+ features = [
+ x[:, 1:, :].permute(0, 2, 1).reshape(batch_size, -1, patch_resolution, patch_resolution) for x in features
+ ]
+
+ # apply FPNs
+ ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4]
+ for i in range(len(features)):
+ features[i] = ops[i](features[i])
+
+ logits = self.decode_head(features)
+
+ auxiliary_logits = None
+ if self.auxiliary_head is not None:
+ auxiliary_logits = self.auxiliary_head(features)
+
+ loss = None
+ if labels is not None:
+ if self.config.num_labels == 1:
+ raise ValueError("The number of labels should be greater than one")
+ else:
+ loss = self.compute_loss(logits, auxiliary_logits, labels)
+
+ if not return_dict:
+ if output_hidden_states:
+ output = (logits,) + outputs[1:]
+ else:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SemanticSegmenterOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ BEiT backbone, to be used with frameworks like DETR and MaskFormer.
+ """,
+ BEIT_START_DOCSTRING,
+)
+class BeitBackbone(BeitPreTrainedModel, BackboneMixin):
+ def __init__(self, config):
+ super().__init__(config)
+ super()._init_backbone(config)
+
+ self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)]
+ self.embeddings = BeitEmbeddings(config)
+ self.encoder = BeitEncoder(config, window_size=self.embeddings.patch_embeddings.patch_shape)
+
+ if config.add_fpn:
+ if len(self.config.out_indices) != 4:
+ raise ValueError(
+ "BeitBackbone requires config.out_indices to be a list of 4 integers, "
+ "specifying which features to use from the backbone. One can use [3, 5, 7, 11] in case of "
+ "a base-sized architecture."
+ )
+ hidden_size = config.hidden_size
+ self.fpn1 = nn.Sequential(
+ nn.ConvTranspose2d(hidden_size, hidden_size, kernel_size=2, stride=2),
+ nn.BatchNorm2d(hidden_size, eps=config.batch_norm_eps),
+ nn.GELU(),
+ nn.ConvTranspose2d(hidden_size, hidden_size, kernel_size=2, stride=2),
+ )
+
+ self.fpn2 = nn.Sequential(nn.ConvTranspose2d(hidden_size, hidden_size, kernel_size=2, stride=2))
+ self.fpn3 = nn.Identity()
+ self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2)
+
+ # initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.patch_embeddings
+
+ @add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: Tensor,
+ output_hidden_states: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> BackboneOutput:
+ """
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, AutoBackbone
+ >>> import torch
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-patch16-224")
+ >>> model = AutoBackbone.from_pretrained(
+ ... "microsoft/beit-base-patch16-224", out_features=["stage1", "stage2", "stage3", "stage4"]
+ ... )
+
+ >>> inputs = processor(image, return_tensors="pt")
+
+ >>> outputs = model(**inputs)
+ >>> feature_maps = outputs.feature_maps
+ >>> list(feature_maps[-1].shape)
+ [1, 768, 14, 14]
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+
+ batch_size = pixel_values.shape[0]
+ embedding_output, (patch_height, patch_width) = self.embeddings(pixel_values)
+
+ outputs = self.encoder(
+ embedding_output, output_hidden_states=True, output_attentions=output_attentions, return_dict=return_dict
+ )
+
+ hidden_states = outputs.hidden_states if return_dict else outputs[1]
+
+ feature_maps = ()
+ for stage, hidden_state in zip(self.stage_names, hidden_states):
+ if stage in self.out_features:
+ if self.config.reshape_hidden_states:
+ hidden_state = hidden_state[:, 1:, :]
+ hidden_state = hidden_state.permute(0, 2, 1)
+ hidden_state = hidden_state.reshape(batch_size, -1, patch_height, patch_width)
+
+ feature_maps += (hidden_state,)
+
+ if self.config.add_fpn:
+ feature_maps = [
+ self.fpn1(feature_maps[0]),
+ self.fpn2(feature_maps[1]),
+ self.fpn3(feature_maps[2]),
+ self.fpn4(feature_maps[3]),
+ ]
+ feature_maps = tuple(feature_maps)
+
+ if not return_dict:
+ if output_hidden_states:
+ output = (feature_maps,) + outputs[1:]
+ else:
+ output = (feature_maps,) + outputs[2:]
+ return output
+
+ return BackboneOutput(
+ feature_maps=feature_maps,
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
+ attentions=outputs.attentions,
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/modeling_flax_beit.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/modeling_flax_beit.py
new file mode 100644
index 0000000000000000000000000000000000000000..c1da64d263a26678a5514e76a17e05c44352eee3
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/beit/modeling_flax_beit.py
@@ -0,0 +1,948 @@
+# coding=utf-8
+# Copyright 2021 Microsoft Research and the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from typing import Callable, List, Optional, Tuple
+
+import flax
+import flax.linen as nn
+import jax
+import jax.numpy as jnp
+import numpy as np
+from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
+from flax.linen.attention import dot_product_attention_weights
+from flax.traverse_util import flatten_dict, unflatten_dict
+
+from ...modeling_flax_outputs import (
+ FlaxBaseModelOutput,
+ FlaxBaseModelOutputWithPooling,
+ FlaxMaskedLMOutput,
+ FlaxSequenceClassifierOutput,
+)
+from ...modeling_flax_utils import (
+ ACT2FN,
+ FlaxPreTrainedModel,
+ append_replace_return_docstrings,
+ overwrite_call_docstring,
+)
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward
+from .configuration_beit import BeitConfig
+
+
+@flax.struct.dataclass
+class FlaxBeitModelOutputWithPooling(FlaxBaseModelOutputWithPooling):
+ """
+ Class for outputs of [`FlaxBeitModel`].
+
+ Args:
+ last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ pooler_output (`jnp.ndarray` of shape `(batch_size, hidden_size)`):
+ Average of the last layer hidden states of the patch tokens (excluding the *[CLS]* token) if
+ *config.use_mean_pooling* is set to True. If set to False, then the final hidden state of the *[CLS]* token
+ will be returned.
+ hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus
+ the initial embedding outputs.
+ attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ """
+
+
+BEIT_START_DOCSTRING = r"""
+
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading, saving and converting weights from PyTorch models)
+
+ This model is also a
+ [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as
+ a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and
+ behavior.
+
+ Finally, this model supports inherent JAX features such as:
+
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
+
+ Parameters:
+ config ([`BeitConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
+ `jax.numpy.bfloat16` (on TPUs).
+
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
+ specified all the computation will be performed with the given `dtype`.
+
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
+ parameters.**
+
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
+ [`~FlaxPreTrainedModel.to_bf16`].
+"""
+
+BEIT_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`numpy.ndarray` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
+ [`AutoImageProcessor.__call__`] for details.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+def relative_position_index_init(window_size: Tuple[int, int]) -> jnp.ndarray:
+ """
+ get pair-wise relative position index for each token inside the window
+ """
+ num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
+
+ coords_h = np.arange(window_size[0])
+ coords_w = np.arange(window_size[1])
+ coords = np.stack(np.meshgrid(coords_h, coords_w, indexing="ij")) # 2, Wh, Ww
+ coords_flatten = np.reshape(coords, (2, -1))
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
+ relative_coords = np.transpose(relative_coords, (1, 2, 0)) # Wh*Ww, Wh*Ww, 2
+ relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
+ relative_coords[:, :, 1] += window_size[1] - 1
+ relative_coords[:, :, 0] *= 2 * window_size[1] - 1
+
+ relative_position_index = np.zeros(shape=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype)
+ relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
+ relative_position_index[0, 0:] = num_relative_distance - 3
+ relative_position_index[0:, 0] = num_relative_distance - 2
+ relative_position_index[0, 0] = num_relative_distance - 1
+ return jnp.array(relative_position_index)
+
+
+def ones_with_scale(key, shape, scale, dtype=jnp.float32):
+ return jnp.ones(shape, dtype) * scale
+
+
+class FlaxBeitDropPath(nn.Module):
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
+
+ rate: float
+
+ @nn.module.compact
+ def __call__(self, inputs, deterministic: Optional[bool] = True):
+ if self.rate == 0.0:
+ return inputs
+ keep_prob = 1.0 - self.rate
+ if deterministic:
+ return inputs
+ else:
+ shape = (inputs.shape[0],) + (1,) * (inputs.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
+ rng = self.make_rng("droppath")
+ random_tensor = keep_prob + jax.random.uniform(rng, shape=shape, dtype=inputs.dtype)
+ binary_tensor = jnp.floor(random_tensor)
+ output = inputs / keep_prob * binary_tensor
+ return output
+
+
+class FlaxBeitPatchEmbeddings(nn.Module):
+ config: BeitConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.num_channels = self.config.num_channels
+ image_size = self.config.image_size
+ patch_size = self.config.patch_size
+ num_patches = (image_size // patch_size) * (image_size // patch_size)
+ patch_shape = (image_size // patch_size, image_size // patch_size)
+ self.num_patches = num_patches
+ self.patch_shape = patch_shape
+ self.projection = nn.Conv(
+ self.config.hidden_size,
+ kernel_size=(patch_size, patch_size),
+ strides=(patch_size, patch_size),
+ padding="VALID",
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ )
+
+ def __call__(self, pixel_values):
+ num_channels = pixel_values.shape[-1]
+ if num_channels != self.num_channels:
+ raise ValueError(
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
+ )
+ embeddings = self.projection(pixel_values)
+ batch_size, _, _, channels = embeddings.shape
+ return jnp.reshape(embeddings, (batch_size, -1, channels))
+
+
+class FlaxBeitEmbeddings(nn.Module):
+ """Construct the CLS token, position and patch embeddings."""
+
+ config: BeitConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.cls_token = self.param("cls_token", nn.initializers.zeros, (1, 1, self.config.hidden_size))
+ if self.config.use_mask_token:
+ self.mask_token = self.param("mask_token", nn.initializers.zeros, (1, 1, self.config.hidden_size))
+ self.patch_embeddings = FlaxBeitPatchEmbeddings(self.config, dtype=self.dtype)
+ num_patches = self.patch_embeddings.num_patches
+ if self.config.use_absolute_position_embeddings:
+ self.position_embeddings = self.param(
+ "position_embeddings", nn.initializers.zeros, (1, num_patches + 1, self.config.hidden_size)
+ )
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
+
+ def __call__(self, pixel_values, bool_masked_pos=None, deterministic=True):
+ embeddings = self.patch_embeddings(pixel_values)
+ batch_size, seq_len, _ = embeddings.shape
+
+ cls_tokens = jnp.broadcast_to(self.cls_token, (batch_size, 1, self.config.hidden_size))
+ cls_tokens = cls_tokens.astype(embeddings.dtype)
+
+ if bool_masked_pos is not None:
+ mask_tokens = jnp.broadcast_to(self.mask_token, (batch_size, seq_len, self.config.hidden_size))
+ mask_tokens = mask_tokens.astype(embeddings.dtype)
+ # replace the masked visual tokens by mask_tokens
+ w = jnp.expand_dims(bool_masked_pos, axis=-1)
+ embeddings = embeddings * (1 - w) + mask_tokens * w
+
+ embeddings = jnp.concatenate((cls_tokens, embeddings), axis=1)
+
+ if self.config.use_absolute_position_embeddings:
+ embeddings = embeddings + self.position_embeddings.astype(embeddings.dtype)
+
+ embeddings = self.dropout(embeddings, deterministic=deterministic)
+ return embeddings
+
+
+class FlaxBeitRelativePositionBias(nn.Module):
+ config: BeitConfig
+ window_size: Tuple[int, int]
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ num_relative_distance = (2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1) + 3
+ self.relative_position_bias_table = self.param(
+ "relative_position_bias_table",
+ nn.initializers.zeros,
+ (num_relative_distance, self.config.num_attention_heads),
+ ) # 2*Wh-1 * 2*Ww-1, nH
+ # cls to token & token 2 cls & cls to cls
+
+ self.relative_position_index = relative_position_index_init(self.window_size)
+
+ def __call__(self):
+ index = self.relative_position_index.reshape(-1)
+ shape = (self.window_size[0] * self.window_size[1] + 1, self.window_size[0] * self.window_size[1] + 1, -1)
+ relative_position_bias = self.relative_position_bias_table[index].reshape(shape) # Wh*Ww,Wh*Ww,nH
+ return jnp.transpose(relative_position_bias, (2, 0, 1))
+
+
+class FlaxBeitSelfAttention(nn.Module):
+ config: BeitConfig
+ window_size: Tuple[int, int]
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ if self.config.hidden_size % self.config.num_attention_heads != 0 and not hasattr(
+ self.config, "embedding_size"
+ ):
+ raise ValueError(
+ f"The hidden size {self.config.hidden_size,} is not a multiple of the number of attention "
+ f"heads {self.config.num_attention_heads}."
+ )
+
+ self.query = nn.Dense(
+ self.config.hidden_size,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ )
+ self.key = nn.Dense(
+ self.config.hidden_size,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ use_bias=False,
+ )
+ self.value = nn.Dense(
+ self.config.hidden_size,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ )
+
+ self.relative_position_bias = (
+ FlaxBeitRelativePositionBias(self.config, window_size=self.window_size, dtype=self.dtype)
+ if self.window_size
+ else None
+ )
+
+ def __call__(
+ self, hidden_states, relative_position_bias=None, deterministic: bool = True, output_attentions: bool = False
+ ):
+ head_dim = self.config.hidden_size // self.config.num_attention_heads
+
+ query_states = self.query(hidden_states).reshape(
+ hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
+ )
+ value_states = self.value(hidden_states).reshape(
+ hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
+ )
+ key_states = self.key(hidden_states).reshape(
+ hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
+ )
+
+ dropout_rng = None
+ if not deterministic and self.config.attention_probs_dropout_prob > 0.0:
+ dropout_rng = self.make_rng("dropout")
+
+ attention_bias = jnp.array(0.0, dtype=self.dtype)
+ # Add relative position bias if present.
+ if self.relative_position_bias is not None:
+ attention_bias = jnp.expand_dims(self.relative_position_bias(), 0)
+ attention_bias = attention_bias.astype(query_states.dtype)
+
+ # Add shared relative position bias if provided.
+ if relative_position_bias is not None:
+ attention_bias = attention_bias + relative_position_bias.astype(attention_bias.dtype)
+
+ attn_weights = dot_product_attention_weights(
+ query_states,
+ key_states,
+ bias=attention_bias,
+ dropout_rng=dropout_rng,
+ dropout_rate=self.config.attention_probs_dropout_prob,
+ broadcast_dropout=True,
+ deterministic=deterministic,
+ dtype=self.dtype,
+ precision=None,
+ )
+
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
+ attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,))
+
+ outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
+ return outputs
+
+
+class FlaxBeitSelfOutput(nn.Module):
+ config: BeitConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.dense = nn.Dense(
+ self.config.hidden_size,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ dtype=self.dtype,
+ )
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
+
+ def __call__(self, hidden_states, deterministic: bool = True):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ return hidden_states
+
+
+class FlaxBeitAttention(nn.Module):
+ config: BeitConfig
+ window_size: Tuple[int, int]
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.attention = FlaxBeitSelfAttention(self.config, self.window_size, dtype=self.dtype)
+ self.output = FlaxBeitSelfOutput(self.config, dtype=self.dtype)
+
+ def __call__(
+ self, hidden_states, relative_position_bias=None, deterministic=True, output_attentions: bool = False
+ ):
+ attn_outputs = self.attention(
+ hidden_states, relative_position_bias, deterministic=deterministic, output_attentions=output_attentions
+ )
+ attn_output = attn_outputs[0]
+ attn_output = self.output(attn_output, deterministic=deterministic)
+
+ outputs = (attn_output,)
+
+ if output_attentions:
+ outputs += (attn_outputs[1],)
+
+ return outputs
+
+
+class FlaxBeitIntermediate(nn.Module):
+ config: BeitConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.dense = nn.Dense(
+ self.config.intermediate_size,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ dtype=self.dtype,
+ )
+ self.activation = ACT2FN[self.config.hidden_act]
+
+ def __call__(self, hidden_states):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.activation(hidden_states)
+
+ return hidden_states
+
+
+class FlaxBeitOutput(nn.Module):
+ config: BeitConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.dense = nn.Dense(
+ self.config.hidden_size,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ dtype=self.dtype,
+ )
+ self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
+
+ def __call__(self, hidden_states, deterministic: bool = True):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+
+ return hidden_states
+
+
+class FlaxBeitLayer(nn.Module):
+ config: BeitConfig
+ window_size: Tuple[int, int]
+ drop_path_rate: float
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.attention = FlaxBeitAttention(self.config, self.window_size, dtype=self.dtype)
+ self.intermediate = FlaxBeitIntermediate(self.config, dtype=self.dtype)
+ self.output = FlaxBeitOutput(self.config, dtype=self.dtype)
+ self.layernorm_before = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
+ self.drop_path = FlaxBeitDropPath(rate=self.drop_path_rate)
+ self.layernorm_after = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
+
+ self.init_values = self.config.layer_scale_init_value
+ if self.init_values > 0:
+ self.lambda_1 = self.param("lambda_1", ones_with_scale, (self.config.hidden_size), self.init_values)
+ self.lambda_2 = self.param("lambda_2", ones_with_scale, (self.config.hidden_size), self.init_values)
+ else:
+ self.lambda_1 = None
+ self.lambda_2 = None
+
+ def __call__(
+ self, hidden_states, relative_position_bias=None, deterministic: bool = True, output_attentions: bool = False
+ ):
+ self_attention_outputs = self.attention(
+ self.layernorm_before(hidden_states), # in BEiT, layernorm is applied before self-attention
+ relative_position_bias,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ )
+ attention_output = self_attention_outputs[0]
+
+ # apply lambda_1 if present
+ if self.lambda_1 is not None:
+ attention_output = self.lambda_1.astype(attention_output.dtype) * attention_output
+
+ # first residual connection
+ hidden_states = self.drop_path(attention_output, deterministic=deterministic) + hidden_states
+
+ # in BEiT, layernorm is also applied after self-attention
+ layer_output = self.layernorm_after(hidden_states)
+
+ layer_output = self.intermediate(layer_output)
+ layer_output = self.output(layer_output, deterministic=deterministic)
+
+ # apply lambda_2 if present
+ if self.lambda_2 is not None:
+ layer_output = self.lambda_2.astype(layer_output.dtype) * layer_output
+
+ # second residual connection
+ layer_output = self.drop_path(layer_output, deterministic=deterministic) + hidden_states
+
+ outputs = (layer_output,)
+
+ if output_attentions:
+ outputs += (self_attention_outputs[1],)
+
+ return outputs
+
+
+class FlaxBeitLayerCollection(nn.Module):
+ config: BeitConfig
+ window_size: Tuple[int, int]
+ drop_path_rates: List[float]
+ relative_position_bias: Callable[[], jnp.ndarray]
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.layers = [
+ FlaxBeitLayer(
+ self.config,
+ window_size=self.window_size if self.config.use_relative_position_bias else None,
+ drop_path_rate=self.drop_path_rates[i],
+ name=str(i),
+ dtype=self.dtype,
+ )
+ for i in range(self.config.num_hidden_layers)
+ ]
+
+ def __call__(
+ self,
+ hidden_states,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ all_attentions = () if output_attentions else None
+ all_hidden_states = () if output_hidden_states else None
+
+ for i, layer in enumerate(self.layers):
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+ relative_position_bias = self.relative_position_bias() if self.relative_position_bias is not None else None
+ layer_outputs = layer(
+ hidden_states, relative_position_bias, deterministic=deterministic, output_attentions=output_attentions
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions += (layer_outputs[1],)
+
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ outputs = (hidden_states,)
+ if not return_dict:
+ return tuple(v for v in outputs if v is not None)
+
+ return FlaxBaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
+ )
+
+
+class FlaxBeitEncoder(nn.Module):
+ config: BeitConfig
+ window_size: Tuple[int, int]
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ if self.config.use_shared_relative_position_bias:
+ self.relative_position_bias = FlaxBeitRelativePositionBias(
+ config=self.config, window_size=self.window_size, dtype=self.dtype
+ )
+
+ # stochastic depth decay rule
+ drop_path_rates = list(np.linspace(0, self.config.drop_path_rate, self.config.num_hidden_layers))
+ self.layer = FlaxBeitLayerCollection(
+ self.config,
+ window_size=self.window_size,
+ drop_path_rates=drop_path_rates,
+ relative_position_bias=self.relative_position_bias
+ if self.config.use_shared_relative_position_bias
+ else None,
+ dtype=self.dtype,
+ )
+
+ def __call__(
+ self,
+ hidden_states,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ return self.layer(
+ hidden_states,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+
+class FlaxBeitPreTrainedModel(FlaxPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = BeitConfig
+ base_model_prefix = "beit"
+ main_input_name = "pixel_values"
+ module_class: nn.Module = None
+
+ def __init__(
+ self,
+ config: BeitConfig,
+ input_shape=None,
+ seed: int = 0,
+ dtype: jnp.dtype = jnp.float32,
+ _do_init: bool = True,
+ **kwargs,
+ ):
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
+ if input_shape is None:
+ input_shape = (1, config.image_size, config.image_size, config.num_channels)
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
+
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
+ # init input tensors
+ pixel_values = jnp.zeros(input_shape, dtype=self.dtype)
+
+ params_rng, dropout_rng = jax.random.split(rng)
+ dropout_rng, droppath_rng = jax.random.split(dropout_rng)
+ rngs = {"params": params_rng, "dropout": dropout_rng, "droppath": droppath_rng}
+
+ random_params = self.module.init(rngs, pixel_values, return_dict=False)["params"]
+
+ if params is not None:
+ random_params = flatten_dict(unfreeze(random_params))
+ params = flatten_dict(unfreeze(params))
+ for missing_key in self._missing_keys:
+ params[missing_key] = random_params[missing_key]
+ self._missing_keys = set()
+ return freeze(unflatten_dict(params))
+ else:
+ return random_params
+
+ @add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ def __call__(
+ self,
+ pixel_values,
+ bool_masked_pos=None,
+ params: dict = None,
+ dropout_rng: jax.random.PRNGKey = None,
+ train: bool = False,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ):
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1))
+ # Handle any PRNG if needed
+ rngs = {}
+ if dropout_rng is not None:
+ dropout_rng, droppath_rng = jax.random.split(dropout_rng)
+ rngs["dropout"] = dropout_rng
+ rngs["droppath"] = droppath_rng
+
+ return self.module.apply(
+ {"params": params or self.params},
+ jnp.array(pixel_values, dtype=jnp.float32),
+ bool_masked_pos,
+ not train,
+ output_attentions,
+ output_hidden_states,
+ return_dict,
+ rngs=rngs,
+ )
+
+
+class FlaxBeitPooler(nn.Module):
+ config: BeitConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ if self.config.use_mean_pooling:
+ self.layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
+
+ def __call__(self, hidden_states):
+ if self.config.use_mean_pooling:
+ # Mean pool the final hidden states of the patch tokens
+ patch_tokens = hidden_states[:, 1:, :]
+ pooled_output = self.layernorm(jnp.mean(patch_tokens, axis=1))
+ else:
+ # Pool by simply taking the final hidden state of the [CLS] token
+ pooled_output = hidden_states[:, 0]
+
+ return pooled_output
+
+
+class FlaxBeitModule(nn.Module):
+ config: BeitConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+ add_pooling_layer: bool = True
+
+ def setup(self):
+ self.embeddings = FlaxBeitEmbeddings(self.config, dtype=self.dtype)
+ self.encoder = FlaxBeitEncoder(
+ self.config, window_size=self.embeddings.patch_embeddings.patch_shape, dtype=self.dtype
+ )
+ if not self.config.use_mean_pooling:
+ self.layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
+ self.pooler = FlaxBeitPooler(self.config, dtype=self.dtype) if self.add_pooling_layer else None
+
+ def __call__(
+ self,
+ pixel_values,
+ bool_masked_pos=None,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ hidden_states = self.embeddings(pixel_values, bool_masked_pos, deterministic=deterministic)
+
+ outputs = self.encoder(
+ hidden_states,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = outputs[0]
+ if not self.config.use_mean_pooling:
+ hidden_states = self.layernorm(hidden_states)
+ pooled = self.pooler(hidden_states) if self.add_pooling_layer else None
+
+ if not return_dict:
+ # if pooled is None, don't return it
+ if pooled is None:
+ return (hidden_states,) + outputs[1:]
+ return (hidden_states, pooled) + outputs[1:]
+
+ return FlaxBeitModelOutputWithPooling(
+ last_hidden_state=hidden_states,
+ pooler_output=pooled,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ "The bare Beit Model transformer outputting raw hidden-states without any specific head on top.",
+ BEIT_START_DOCSTRING,
+)
+class FlaxBeitModel(FlaxBeitPreTrainedModel):
+ module_class = FlaxBeitModule
+
+
+FLAX_BEIT_MODEL_DOCSTRING = """
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, FlaxBeitModel
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-patch16-224-pt22k-ft22k")
+ >>> model = FlaxBeitModel.from_pretrained("microsoft/beit-base-patch16-224-pt22k-ft22k")
+
+ >>> inputs = image_processor(images=image, return_tensors="np")
+ >>> outputs = model(**inputs)
+ >>> last_hidden_states = outputs.last_hidden_state
+ ```
+"""
+
+overwrite_call_docstring(FlaxBeitModel, FLAX_BEIT_MODEL_DOCSTRING)
+append_replace_return_docstrings(FlaxBeitModel, output_type=FlaxBeitModelOutputWithPooling, config_class=BeitConfig)
+
+
+class FlaxBeitForMaskedImageModelingModule(nn.Module):
+ config: BeitConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.beit = FlaxBeitModule(self.config, add_pooling_layer=False, dtype=self.dtype)
+
+ # Classifier head
+ self.layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
+ self.lm_head = nn.Dense(
+ self.config.vocab_size,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ dtype=self.dtype,
+ )
+
+ def __call__(
+ self,
+ pixel_values=None,
+ bool_masked_pos=None,
+ deterministic: bool = True,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ ):
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.beit(
+ pixel_values,
+ bool_masked_pos,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+ sequence_output = self.layernorm(sequence_output)
+ prediction_scores = self.lm_head(sequence_output[:, 1:])
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[2:]
+ return output
+
+ return FlaxMaskedLMOutput(
+ logits=prediction_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ "Beit Model transformer with a 'language' modeling head on top (to predict visual tokens).",
+ BEIT_START_DOCSTRING,
+)
+class FlaxBeitForMaskedImageModeling(FlaxBeitPreTrainedModel):
+ module_class = FlaxBeitForMaskedImageModelingModule
+
+
+FLAX_BEIT_MLM_DOCSTRING = """
+ bool_masked_pos (`numpy.ndarray` of shape `(batch_size, num_patches)`):
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, BeitForMaskedImageModeling
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-patch16-224-pt22k")
+ >>> model = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k")
+
+ >>> inputs = image_processor(images=image, return_tensors="np")
+ >>> outputs = model(**inputs)
+ >>> logits = outputs.logits
+ ```
+"""
+
+overwrite_call_docstring(FlaxBeitForMaskedImageModeling, FLAX_BEIT_MLM_DOCSTRING)
+append_replace_return_docstrings(
+ FlaxBeitForMaskedImageModeling, output_type=FlaxMaskedLMOutput, config_class=BeitConfig
+)
+
+
+class FlaxBeitForImageClassificationModule(nn.Module):
+ config: BeitConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.beit = FlaxBeitModule(config=self.config, dtype=self.dtype, add_pooling_layer=True)
+ self.classifier = nn.Dense(
+ self.config.num_labels,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ dtype=self.dtype,
+ )
+
+ def __call__(
+ self,
+ pixel_values=None,
+ bool_masked_pos=None,
+ deterministic: bool = True,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ ):
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.beit(
+ pixel_values,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = outputs[1]
+ logits = self.classifier(pooled_output)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return output
+
+ return FlaxSequenceClassifierOutput(
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Beit Model transformer with an image classification head on top (a linear layer on top of the average of the final
+ hidden states of the patch tokens) e.g. for ImageNet.
+ """,
+ BEIT_START_DOCSTRING,
+)
+class FlaxBeitForImageClassification(FlaxBeitPreTrainedModel):
+ module_class = FlaxBeitForImageClassificationModule
+
+
+FLAX_BEIT_CLASSIF_DOCSTRING = """
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, FlaxBeitForImageClassification
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-patch16-224")
+ >>> model = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224")
+
+ >>> inputs = image_processor(images=image, return_tensors="np")
+ >>> outputs = model(**inputs)
+ >>> logits = outputs.logits
+ >>> # model predicts one of the 1000 ImageNet classes
+ >>> predicted_class_idx = logits.argmax(-1).item()
+ >>> print("Predicted class:", model.config.id2label[predicted_class_idx])
+ ```
+"""
+
+overwrite_call_docstring(FlaxBeitForImageClassification, FLAX_BEIT_CLASSIF_DOCSTRING)
+append_replace_return_docstrings(
+ FlaxBeitForImageClassification, output_type=FlaxSequenceClassifierOutput, config_class=BeitConfig
+)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/tokenization_clvp.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/tokenization_clvp.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fcd41e68dc10b98eed91c73d7760f1c85dd2064a
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/tokenization_clvp.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a0b44186efbc05bef9faed3a47057fcfe3610862
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__init__.py
@@ -0,0 +1,113 @@
+# Copyright 2021 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_tf_available,
+ is_torch_available,
+ is_vision_available,
+)
+
+
+_import_structure = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
+
+try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["feature_extraction_deit"] = ["DeiTFeatureExtractor"]
+ _import_structure["image_processing_deit"] = ["DeiTImageProcessor"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_deit"] = [
+ "DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "DeiTForImageClassification",
+ "DeiTForImageClassificationWithTeacher",
+ "DeiTForMaskedImageModeling",
+ "DeiTModel",
+ "DeiTPreTrainedModel",
+ ]
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_deit"] = [
+ "TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TFDeiTForImageClassification",
+ "TFDeiTForImageClassificationWithTeacher",
+ "TFDeiTForMaskedImageModeling",
+ "TFDeiTModel",
+ "TFDeiTPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
+
+ try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .feature_extraction_deit import DeiTFeatureExtractor
+ from .image_processing_deit import DeiTImageProcessor
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_deit import (
+ DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ DeiTForImageClassification,
+ DeiTForImageClassificationWithTeacher,
+ DeiTForMaskedImageModeling,
+ DeiTModel,
+ DeiTPreTrainedModel,
+ )
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_deit import (
+ TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TFDeiTForImageClassification,
+ TFDeiTForImageClassificationWithTeacher,
+ TFDeiTForMaskedImageModeling,
+ TFDeiTModel,
+ TFDeiTPreTrainedModel,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b2e58482ed875ed99aaf390adcc090ba39b673c3
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/configuration_deit.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/configuration_deit.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7a8ec79ad2226aacef671fe5e88a7720212f8576
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/configuration_deit.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/convert_deit_timm_to_pytorch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/convert_deit_timm_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..efc32b6a7cecd4e2b1bedd6e24b4a9fc3f231caf
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/convert_deit_timm_to_pytorch.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/feature_extraction_deit.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/feature_extraction_deit.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1750457aab04801593dfe948ede0c2a840993881
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/feature_extraction_deit.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/image_processing_deit.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/image_processing_deit.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3d2223329a40c84bf81972ee894af7d5a55de4cf
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/image_processing_deit.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/modeling_deit.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/modeling_deit.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d1eca3289d99554fee52607f3bb44231e455563c
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/modeling_deit.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/modeling_tf_deit.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/modeling_tf_deit.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6668e366d47f4872bd33190c829b126a247b9749
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/__pycache__/modeling_tf_deit.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/configuration_deit.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/configuration_deit.py
new file mode 100644
index 0000000000000000000000000000000000000000..394c6ff93704ccedad80c1a20c7f8a9aa2e5a04d
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/configuration_deit.py
@@ -0,0 +1,142 @@
+# coding=utf-8
+# Copyright 2021 Facebook AI Research (FAIR) and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" DeiT model configuration"""
+
+from collections import OrderedDict
+from typing import Mapping
+
+from packaging import version
+
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class DeiTConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`DeiTModel`]. It is used to instantiate an DeiT
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the DeiT
+ [facebook/deit-base-distilled-patch16-224](https://huggingface.co/facebook/deit-base-distilled-patch16-224)
+ architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ image_size (`int`, *optional*, defaults to 224):
+ The size (resolution) of each image.
+ patch_size (`int`, *optional*, defaults to 16):
+ The size (resolution) of each patch.
+ num_channels (`int`, *optional*, defaults to 3):
+ The number of input channels.
+ qkv_bias (`bool`, *optional*, defaults to `True`):
+ Whether to add a bias to the queries, keys and values.
+ encoder_stride (`int`, *optional*, defaults to 16):
+ Factor to increase the spatial resolution by in the decoder head for masked image modeling.
+
+ Example:
+
+ ```python
+ >>> from transformers import DeiTConfig, DeiTModel
+
+ >>> # Initializing a DeiT deit-base-distilled-patch16-224 style configuration
+ >>> configuration = DeiTConfig()
+
+ >>> # Initializing a model (with random weights) from the deit-base-distilled-patch16-224 style configuration
+ >>> model = DeiTModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "deit"
+
+ def __init__(
+ self,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.0,
+ attention_probs_dropout_prob=0.0,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ image_size=224,
+ patch_size=16,
+ num_channels=3,
+ qkv_bias=True,
+ encoder_stride=16,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.intermediate_size = intermediate_size
+ self.hidden_act = hidden_act
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.qkv_bias = qkv_bias
+ self.encoder_stride = encoder_stride
+
+
+class DeiTOnnxConfig(OnnxConfig):
+ torch_onnx_minimum_version = version.parse("1.11")
+
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ return OrderedDict(
+ [
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
+ ]
+ )
+
+ @property
+ def atol_for_validation(self) -> float:
+ return 1e-4
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/convert_deit_timm_to_pytorch.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/convert_deit_timm_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b5c795ff2d2ab6d8b3e6ce6f8a0150ff3911f33
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/convert_deit_timm_to_pytorch.py
@@ -0,0 +1,219 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert DeiT distilled checkpoints from the timm library."""
+
+
+import argparse
+import json
+from pathlib import Path
+
+import requests
+import timm
+import torch
+from huggingface_hub import hf_hub_download
+from PIL import Image
+
+from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
+from transformers.utils import logging
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+
+# here we list all keys to be renamed (original name on the left, our name on the right)
+def create_rename_keys(config, base_model=False):
+ rename_keys = []
+ for i in range(config.num_hidden_layers):
+ # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
+ rename_keys.append((f"blocks.{i}.norm1.weight", f"deit.encoder.layer.{i}.layernorm_before.weight"))
+ rename_keys.append((f"blocks.{i}.norm1.bias", f"deit.encoder.layer.{i}.layernorm_before.bias"))
+ rename_keys.append((f"blocks.{i}.attn.proj.weight", f"deit.encoder.layer.{i}.attention.output.dense.weight"))
+ rename_keys.append((f"blocks.{i}.attn.proj.bias", f"deit.encoder.layer.{i}.attention.output.dense.bias"))
+ rename_keys.append((f"blocks.{i}.norm2.weight", f"deit.encoder.layer.{i}.layernorm_after.weight"))
+ rename_keys.append((f"blocks.{i}.norm2.bias", f"deit.encoder.layer.{i}.layernorm_after.bias"))
+ rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"deit.encoder.layer.{i}.intermediate.dense.weight"))
+ rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"deit.encoder.layer.{i}.intermediate.dense.bias"))
+ rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"deit.encoder.layer.{i}.output.dense.weight"))
+ rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"deit.encoder.layer.{i}.output.dense.bias"))
+
+ # projection layer + position embeddings
+ rename_keys.extend(
+ [
+ ("cls_token", "deit.embeddings.cls_token"),
+ ("dist_token", "deit.embeddings.distillation_token"),
+ ("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
+ ("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
+ ("pos_embed", "deit.embeddings.position_embeddings"),
+ ]
+ )
+
+ if base_model:
+ # layernorm + pooler
+ rename_keys.extend(
+ [
+ ("norm.weight", "layernorm.weight"),
+ ("norm.bias", "layernorm.bias"),
+ ("pre_logits.fc.weight", "pooler.dense.weight"),
+ ("pre_logits.fc.bias", "pooler.dense.bias"),
+ ]
+ )
+
+ # if just the base model, we should remove "deit" from all keys that start with "deit"
+ rename_keys = [(pair[0], pair[1][4:]) if pair[1].startswith("deit") else pair for pair in rename_keys]
+ else:
+ # layernorm + classification heads
+ rename_keys.extend(
+ [
+ ("norm.weight", "deit.layernorm.weight"),
+ ("norm.bias", "deit.layernorm.bias"),
+ ("head.weight", "cls_classifier.weight"),
+ ("head.bias", "cls_classifier.bias"),
+ ("head_dist.weight", "distillation_classifier.weight"),
+ ("head_dist.bias", "distillation_classifier.bias"),
+ ]
+ )
+
+ return rename_keys
+
+
+# we split up the matrix of each encoder layer into queries, keys and values
+def read_in_q_k_v(state_dict, config, base_model=False):
+ for i in range(config.num_hidden_layers):
+ if base_model:
+ prefix = ""
+ else:
+ prefix = "deit."
+ # read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
+ in_proj_weight = state_dict.pop(f"blocks.{i}.attn.qkv.weight")
+ in_proj_bias = state_dict.pop(f"blocks.{i}.attn.qkv.bias")
+ # next, add query, keys and values (in that order) to the state dict
+ state_dict[f"{prefix}encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[
+ : config.hidden_size, :
+ ]
+ state_dict[f"{prefix}encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size]
+ state_dict[f"{prefix}encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[
+ config.hidden_size : config.hidden_size * 2, :
+ ]
+ state_dict[f"{prefix}encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[
+ config.hidden_size : config.hidden_size * 2
+ ]
+ state_dict[f"{prefix}encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[
+ -config.hidden_size :, :
+ ]
+ state_dict[f"{prefix}encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :]
+
+
+def rename_key(dct, old, new):
+ val = dct.pop(old)
+ dct[new] = val
+
+
+# We will verify our results on an image of cute cats
+def prepare_img():
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ im = Image.open(requests.get(url, stream=True).raw)
+ return im
+
+
+@torch.no_grad()
+def convert_deit_checkpoint(deit_name, pytorch_dump_folder_path):
+ """
+ Copy/paste/tweak model's weights to our DeiT structure.
+ """
+
+ # define default DeiT configuration
+ config = DeiTConfig()
+ # all deit models have fine-tuned heads
+ base_model = False
+ # dataset (fine-tuned on ImageNet 2012), patch_size and image_size
+ config.num_labels = 1000
+ repo_id = "huggingface/label-files"
+ filename = "imagenet-1k-id2label.json"
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
+ id2label = {int(k): v for k, v in id2label.items()}
+ config.id2label = id2label
+ config.label2id = {v: k for k, v in id2label.items()}
+ config.patch_size = int(deit_name[-6:-4])
+ config.image_size = int(deit_name[-3:])
+ # size of the architecture
+ if deit_name[9:].startswith("tiny"):
+ config.hidden_size = 192
+ config.intermediate_size = 768
+ config.num_hidden_layers = 12
+ config.num_attention_heads = 3
+ elif deit_name[9:].startswith("small"):
+ config.hidden_size = 384
+ config.intermediate_size = 1536
+ config.num_hidden_layers = 12
+ config.num_attention_heads = 6
+ if deit_name[9:].startswith("base"):
+ pass
+ elif deit_name[4:].startswith("large"):
+ config.hidden_size = 1024
+ config.intermediate_size = 4096
+ config.num_hidden_layers = 24
+ config.num_attention_heads = 16
+
+ # load original model from timm
+ timm_model = timm.create_model(deit_name, pretrained=True)
+ timm_model.eval()
+
+ # load state_dict of original model, remove and rename some keys
+ state_dict = timm_model.state_dict()
+ rename_keys = create_rename_keys(config, base_model)
+ for src, dest in rename_keys:
+ rename_key(state_dict, src, dest)
+ read_in_q_k_v(state_dict, config, base_model)
+
+ # load HuggingFace model
+ model = DeiTForImageClassificationWithTeacher(config).eval()
+ model.load_state_dict(state_dict)
+
+ # Check outputs on an image, prepared by DeiTImageProcessor
+ size = int(
+ (256 / 224) * config.image_size
+ ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
+ image_processor = DeiTImageProcessor(size=size, crop_size=config.image_size)
+ encoding = image_processor(images=prepare_img(), return_tensors="pt")
+ pixel_values = encoding["pixel_values"]
+ outputs = model(pixel_values)
+
+ timm_logits = timm_model(pixel_values)
+ assert timm_logits.shape == outputs.logits.shape
+ assert torch.allclose(timm_logits, outputs.logits, atol=1e-3)
+
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
+ print(f"Saving model {deit_name} to {pytorch_dump_folder_path}")
+ model.save_pretrained(pytorch_dump_folder_path)
+ print(f"Saving image processor to {pytorch_dump_folder_path}")
+ image_processor.save_pretrained(pytorch_dump_folder_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--deit_name",
+ default="vit_deit_base_distilled_patch16_224",
+ type=str,
+ help="Name of the DeiT timm model you'd like to convert.",
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
+ )
+
+ args = parser.parse_args()
+ convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/feature_extraction_deit.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/feature_extraction_deit.py
new file mode 100644
index 0000000000000000000000000000000000000000..b66922ea95753a81b93a3f9c99607119017df3f3
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/feature_extraction_deit.py
@@ -0,0 +1,33 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Feature extractor class for DeiT."""
+
+import warnings
+
+from ...utils import logging
+from .image_processing_deit import DeiTImageProcessor
+
+
+logger = logging.get_logger(__name__)
+
+
+class DeiTFeatureExtractor(DeiTImageProcessor):
+ def __init__(self, *args, **kwargs) -> None:
+ warnings.warn(
+ "The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
+ " use DeiTImageProcessor instead.",
+ FutureWarning,
+ )
+ super().__init__(*args, **kwargs)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/image_processing_deit.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/image_processing_deit.py
new file mode 100644
index 0000000000000000000000000000000000000000..2a8ebb36377854aa80bf8505c7e98b1eb661648a
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/image_processing_deit.py
@@ -0,0 +1,320 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Image processor class for DeiT."""
+
+from typing import Dict, List, Optional, Union
+
+import numpy as np
+
+from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
+from ...image_transforms import resize, to_channel_dimension_format
+from ...image_utils import (
+ IMAGENET_STANDARD_MEAN,
+ IMAGENET_STANDARD_STD,
+ ChannelDimension,
+ ImageInput,
+ PILImageResampling,
+ infer_channel_dimension_format,
+ is_scaled_image,
+ make_list_of_images,
+ to_numpy_array,
+ valid_images,
+ validate_kwargs,
+ validate_preprocess_arguments,
+)
+from ...utils import TensorType, is_vision_available, logging
+
+
+if is_vision_available():
+ import PIL
+
+
+logger = logging.get_logger(__name__)
+
+
+class DeiTImageProcessor(BaseImageProcessor):
+ r"""
+ Constructs a DeiT image processor.
+
+ Args:
+ do_resize (`bool`, *optional*, defaults to `True`):
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
+ `do_resize` in `preprocess`.
+ size (`Dict[str, int]` *optional*, defaults to `{"height": 256, "width": 256}`):
+ Size of the image after `resize`. Can be overridden by `size` in `preprocess`.
+ resample (`PILImageResampling` filter, *optional*, defaults to `Resampling.BICUBIC`):
+ Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`.
+ do_center_crop (`bool`, *optional*, defaults to `True`):
+ Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image
+ is padded with 0's and then center cropped. Can be overridden by `do_center_crop` in `preprocess`.
+ crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
+ Desired output size when applying center-cropping. Can be overridden by `crop_size` in `preprocess`.
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
+ `preprocess` method.
+ do_rescale (`bool`, *optional*, defaults to `True`):
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
+ parameter in the `preprocess` method.
+ do_normalize (`bool`, *optional*, defaults to `True`):
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
+ method.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
+ """
+
+ model_input_names = ["pixel_values"]
+
+ def __init__(
+ self,
+ do_resize: bool = True,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = PIL.Image.BICUBIC,
+ do_center_crop: bool = True,
+ crop_size: Dict[str, int] = None,
+ rescale_factor: Union[int, float] = 1 / 255,
+ do_rescale: bool = True,
+ do_normalize: bool = True,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ **kwargs,
+ ) -> None:
+ super().__init__(**kwargs)
+ size = size if size is not None else {"height": 256, "width": 256}
+ size = get_size_dict(size)
+ crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
+
+ self.do_resize = do_resize
+ self.size = size
+ self.resample = resample
+ self.do_center_crop = do_center_crop
+ self.crop_size = crop_size
+ self.do_rescale = do_rescale
+ self.rescale_factor = rescale_factor
+ self.do_normalize = do_normalize
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
+ self._valid_processor_keys = [
+ "images",
+ "do_resize",
+ "size",
+ "resample",
+ "do_center_crop",
+ "crop_size",
+ "do_rescale",
+ "rescale_factor",
+ "do_normalize",
+ "image_mean",
+ "image_std",
+ "return_tensors",
+ "data_format",
+ "input_data_format",
+ ]
+
+ # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize with PILImageResampling.BILINEAR->PILImageResampling.BICUBIC
+ def resize(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> np.ndarray:
+ """
+ Resize an image to `(size["height"], size["width"])`.
+
+ Args:
+ image (`np.ndarray`):
+ Image to resize.
+ size (`Dict[str, int]`):
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
+ data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
+ image is used. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+
+ Returns:
+ `np.ndarray`: The resized image.
+ """
+ size = get_size_dict(size)
+ if "height" not in size or "width" not in size:
+ raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
+ output_size = (size["height"], size["width"])
+ return resize(
+ image,
+ size=output_size,
+ resample=resample,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ **kwargs,
+ )
+
+ def preprocess(
+ self,
+ images: ImageInput,
+ do_resize: bool = None,
+ size: Dict[str, int] = None,
+ resample=None,
+ do_center_crop: bool = None,
+ crop_size: Dict[str, int] = None,
+ do_rescale: bool = None,
+ rescale_factor: float = None,
+ do_normalize: bool = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ data_format: ChannelDimension = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> PIL.Image.Image:
+ """
+ Preprocess an image or batch of images.
+
+ Args:
+ images (`ImageInput`):
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
+ Whether to resize the image.
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
+ Size of the image after `resize`.
+ resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
+ PILImageResampling filter to use if resizing the image Only has an effect if `do_resize` is set to
+ `True`.
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
+ Whether to center crop the image.
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
+ Size of the image after center crop. If one edge the image is smaller than `crop_size`, it will be
+ padded with zeros and then cropped
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
+ Whether to rescale the image values between [0 - 1].
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
+ Whether to normalize the image.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
+ Image mean.
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
+ Image standard deviation.
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Can be one of:
+ - `None`: Return a list of `np.ndarray`.
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format for the output image. Can be one of:
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ """
+ do_resize = do_resize if do_resize is not None else self.do_resize
+ resample = resample if resample is not None else self.resample
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
+ image_mean = image_mean if image_mean is not None else self.image_mean
+ image_std = image_std if image_std is not None else self.image_std
+
+ size = size if size is not None else self.size
+ size = get_size_dict(size)
+ crop_size = crop_size if crop_size is not None else self.crop_size
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
+
+ images = make_list_of_images(images)
+
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
+
+ if not valid_images(images):
+ raise ValueError(
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+ validate_preprocess_arguments(
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ do_center_crop=do_center_crop,
+ crop_size=crop_size,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ )
+ # All transformations expect numpy arrays.
+ images = [to_numpy_array(image) for image in images]
+
+ if is_scaled_image(images[0]) and do_rescale:
+ logger.warning_once(
+ "It looks like you are trying to rescale already rescaled images. If the input"
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
+ )
+
+ if input_data_format is None:
+ # We assume that all images have the same channel dimension format.
+ input_data_format = infer_channel_dimension_format(images[0])
+
+ if do_resize:
+ images = [
+ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ if do_center_crop:
+ images = [
+ self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
+ ]
+
+ if do_rescale:
+ images = [
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ if do_normalize:
+ images = [
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ images = [
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
+ ]
+
+ data = {"pixel_values": images}
+ return BatchFeature(data=data, tensor_type=return_tensors)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/modeling_deit.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/modeling_deit.py
new file mode 100644
index 0000000000000000000000000000000000000000..5efcc95d503da407426611b2b67d5c76ddde73e8
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/modeling_deit.py
@@ -0,0 +1,891 @@
+# coding=utf-8
+# Copyright 2021 Facebook AI Research (FAIR), Ross Wightman, The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch DeiT model."""
+
+
+import collections.abc
+import math
+from dataclasses import dataclass
+from typing import Optional, Set, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import (
+ BaseModelOutput,
+ BaseModelOutputWithPooling,
+ ImageClassifierOutput,
+ MaskedImageModelingOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_deit import DeiTConfig
+
+
+logger = logging.get_logger(__name__)
+
+# General docstring
+_CONFIG_FOR_DOC = "DeiTConfig"
+
+# Base docstring
+_CHECKPOINT_FOR_DOC = "facebook/deit-base-distilled-patch16-224"
+_EXPECTED_OUTPUT_SHAPE = [1, 198, 768]
+
+# Image classification docstring
+_IMAGE_CLASS_CHECKPOINT = "facebook/deit-base-distilled-patch16-224"
+_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
+
+
+from ..deprecated._archive_maps import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+class DeiTEmbeddings(nn.Module):
+ """
+ Construct the CLS token, distillation token, position and patch embeddings. Optionally, also the mask token.
+ """
+
+ def __init__(self, config: DeiTConfig, use_mask_token: bool = False) -> None:
+ super().__init__()
+
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
+ self.distillation_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
+ self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) if use_mask_token else None
+ self.patch_embeddings = DeiTPatchEmbeddings(config)
+ num_patches = self.patch_embeddings.num_patches
+ self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 2, config.hidden_size))
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor] = None) -> torch.Tensor:
+ embeddings = self.patch_embeddings(pixel_values)
+ batch_size, seq_length, _ = embeddings.size()
+
+ if bool_masked_pos is not None:
+ mask_tokens = self.mask_token.expand(batch_size, seq_length, -1)
+ # replace the masked visual tokens by mask_tokens
+ mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
+ embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
+
+ cls_tokens = self.cls_token.expand(batch_size, -1, -1)
+ distillation_tokens = self.distillation_token.expand(batch_size, -1, -1)
+ embeddings = torch.cat((cls_tokens, distillation_tokens, embeddings), dim=1)
+ embeddings = embeddings + self.position_embeddings
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+
+class DeiTPatchEmbeddings(nn.Module):
+ """
+ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
+ `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
+ Transformer.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ image_size, patch_size = config.image_size, config.patch_size
+ num_channels, hidden_size = config.num_channels, config.hidden_size
+
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.num_patches = num_patches
+
+ self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
+
+ def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
+ batch_size, num_channels, height, width = pixel_values.shape
+ if num_channels != self.num_channels:
+ raise ValueError(
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
+ )
+ if height != self.image_size[0] or width != self.image_size[1]:
+ raise ValueError(
+ f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})."
+ )
+ x = self.projection(pixel_values).flatten(2).transpose(1, 2)
+ return x
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTSelfAttention with ViT->DeiT
+class DeiTSelfAttention(nn.Module):
+ def __init__(self, config: DeiTConfig) -> None:
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
+ f"heads {config.num_attention_heads}."
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
+ mixed_query_layer = self.query(hidden_states)
+
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ return outputs
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->DeiT
+class DeiTSelfOutput(nn.Module):
+ """
+ The residual connection is defined in DeiTLayer instead of here (as is the case with other models), due to the
+ layernorm applied before each block.
+ """
+
+ def __init__(self, config: DeiTConfig) -> None:
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ return hidden_states
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->DeiT
+class DeiTAttention(nn.Module):
+ def __init__(self, config: DeiTConfig) -> None:
+ super().__init__()
+ self.attention = DeiTSelfAttention(config)
+ self.output = DeiTSelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads: Set[int]) -> None:
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.attention.query = prune_linear_layer(self.attention.query, index)
+ self.attention.key = prune_linear_layer(self.attention.key, index)
+ self.attention.value = prune_linear_layer(self.attention.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
+ self_outputs = self.attention(hidden_states, head_mask, output_attentions)
+
+ attention_output = self.output(self_outputs[0], hidden_states)
+
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTIntermediate with ViT->DeiT
+class DeiTIntermediate(nn.Module):
+ def __init__(self, config: DeiTConfig) -> None:
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+
+ return hidden_states
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTOutput with ViT->DeiT
+class DeiTOutput(nn.Module):
+ def __init__(self, config: DeiTConfig) -> None:
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ hidden_states = hidden_states + input_tensor
+
+ return hidden_states
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTLayer with ViT->DeiT
+class DeiTLayer(nn.Module):
+ """This corresponds to the Block class in the timm implementation."""
+
+ def __init__(self, config: DeiTConfig) -> None:
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.attention = DeiTAttention(config)
+ self.intermediate = DeiTIntermediate(config)
+ self.output = DeiTOutput(config)
+ self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
+ self_attention_outputs = self.attention(
+ self.layernorm_before(hidden_states), # in DeiT, layernorm is applied before self-attention
+ head_mask,
+ output_attentions=output_attentions,
+ )
+ attention_output = self_attention_outputs[0]
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ # first residual connection
+ hidden_states = attention_output + hidden_states
+
+ # in DeiT, layernorm is also applied after self-attention
+ layer_output = self.layernorm_after(hidden_states)
+ layer_output = self.intermediate(layer_output)
+
+ # second residual connection is done here
+ layer_output = self.output(layer_output, hidden_states)
+
+ outputs = (layer_output,) + outputs
+
+ return outputs
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTEncoder with ViT->DeiT
+class DeiTEncoder(nn.Module):
+ def __init__(self, config: DeiTConfig) -> None:
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList([DeiTLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ) -> Union[tuple, BaseModelOutput]:
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ layer_head_mask,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions)
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+
+class DeiTPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = DeiTConfig
+ base_model_prefix = "deit"
+ main_input_name = "pixel_values"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["DeiTLayer"]
+
+ def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
+ """Initialize the weights"""
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
+ # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid
+ # `trunc_normal_cpu` not implemented in `half` issues
+ module.weight.data = nn.init.trunc_normal_(
+ module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range
+ ).to(module.weight.dtype)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+DEIT_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`DeiTConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+DEIT_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
+ [`DeiTImageProcessor.__call__`] for details.
+
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare DeiT Model transformer outputting raw hidden-states without any specific head on top.",
+ DEIT_START_DOCSTRING,
+)
+class DeiTModel(DeiTPreTrainedModel):
+ def __init__(self, config: DeiTConfig, add_pooling_layer: bool = True, use_mask_token: bool = False) -> None:
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = DeiTEmbeddings(config, use_mask_token=use_mask_token)
+ self.encoder = DeiTEncoder(config)
+
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.pooler = DeiTPooler(config) if add_pooling_layer else None
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self) -> DeiTPatchEmbeddings:
+ return self.embeddings.patch_embeddings
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(DEIT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithPooling,
+ config_class=_CONFIG_FOR_DOC,
+ modality="vision",
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
+ )
+ def forward(
+ self,
+ pixel_values: Optional[torch.Tensor] = None,
+ bool_masked_pos: Optional[torch.BoolTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
+ r"""
+ bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ # TODO: maybe have a cleaner way to cast the input (from `ImageProcessor` side?)
+ expected_dtype = self.embeddings.patch_embeddings.projection.weight.dtype
+ if pixel_values.dtype != expected_dtype:
+ pixel_values = pixel_values.to(expected_dtype)
+
+ embedding_output = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+ sequence_output = self.layernorm(sequence_output)
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
+
+ if not return_dict:
+ head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,)
+ return head_outputs + encoder_outputs[1:]
+
+ return BaseModelOutputWithPooling(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTPooler with ViT->DeiT
+class DeiTPooler(nn.Module):
+ def __init__(self, config: DeiTConfig):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.activation = nn.Tanh()
+
+ def forward(self, hidden_states):
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(first_token_tensor)
+ pooled_output = self.activation(pooled_output)
+ return pooled_output
+
+
+@add_start_docstrings(
+ """DeiT Model with a decoder on top for masked image modeling, as proposed in [SimMIM](https://arxiv.org/abs/2111.09886).
+
+
+
+ Note that we provide a script to pre-train this model on custom data in our [examples
+ directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining).
+
+
+ """,
+ DEIT_START_DOCSTRING,
+)
+class DeiTForMaskedImageModeling(DeiTPreTrainedModel):
+ def __init__(self, config: DeiTConfig) -> None:
+ super().__init__(config)
+
+ self.deit = DeiTModel(config, add_pooling_layer=False, use_mask_token=True)
+
+ self.decoder = nn.Sequential(
+ nn.Conv2d(
+ in_channels=config.hidden_size,
+ out_channels=config.encoder_stride**2 * config.num_channels,
+ kernel_size=1,
+ ),
+ nn.PixelShuffle(config.encoder_stride),
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(DEIT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=MaskedImageModelingOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: Optional[torch.Tensor] = None,
+ bool_masked_pos: Optional[torch.BoolTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[tuple, MaskedImageModelingOutput]:
+ r"""
+ bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
+
+ Returns:
+
+ Examples:
+ ```python
+ >>> from transformers import AutoImageProcessor, DeiTForMaskedImageModeling
+ >>> import torch
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224")
+ >>> model = DeiTForMaskedImageModeling.from_pretrained("facebook/deit-base-distilled-patch16-224")
+
+ >>> num_patches = (model.config.image_size // model.config.patch_size) ** 2
+ >>> pixel_values = image_processor(images=image, return_tensors="pt").pixel_values
+ >>> # create random boolean mask of shape (batch_size, num_patches)
+ >>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool()
+
+ >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
+ >>> loss, reconstructed_pixel_values = outputs.loss, outputs.reconstruction
+ >>> list(reconstructed_pixel_values.shape)
+ [1, 3, 224, 224]
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.deit(
+ pixel_values,
+ bool_masked_pos=bool_masked_pos,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ # Reshape to (batch_size, num_channels, height, width)
+ sequence_output = sequence_output[:, 1:-1]
+ batch_size, sequence_length, num_channels = sequence_output.shape
+ height = width = int(sequence_length**0.5)
+ sequence_output = sequence_output.permute(0, 2, 1).reshape(batch_size, num_channels, height, width)
+
+ # Reconstruct pixel values
+ reconstructed_pixel_values = self.decoder(sequence_output)
+
+ masked_im_loss = None
+ if bool_masked_pos is not None:
+ size = self.config.image_size // self.config.patch_size
+ bool_masked_pos = bool_masked_pos.reshape(-1, size, size)
+ mask = (
+ bool_masked_pos.repeat_interleave(self.config.patch_size, 1)
+ .repeat_interleave(self.config.patch_size, 2)
+ .unsqueeze(1)
+ .contiguous()
+ )
+ reconstruction_loss = nn.functional.l1_loss(pixel_values, reconstructed_pixel_values, reduction="none")
+ masked_im_loss = (reconstruction_loss * mask).sum() / (mask.sum() + 1e-5) / self.config.num_channels
+
+ if not return_dict:
+ output = (reconstructed_pixel_values,) + outputs[1:]
+ return ((masked_im_loss,) + output) if masked_im_loss is not None else output
+
+ return MaskedImageModelingOutput(
+ loss=masked_im_loss,
+ reconstruction=reconstructed_pixel_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ DeiT Model transformer with an image classification head on top (a linear layer on top of the final hidden state of
+ the [CLS] token) e.g. for ImageNet.
+ """,
+ DEIT_START_DOCSTRING,
+)
+class DeiTForImageClassification(DeiTPreTrainedModel):
+ def __init__(self, config: DeiTConfig) -> None:
+ super().__init__(config)
+
+ self.num_labels = config.num_labels
+ self.deit = DeiTModel(config, add_pooling_layer=False)
+
+ # Classifier head
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(DEIT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=ImageClassifierOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[tuple, ImageClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, DeiTForImageClassification
+ >>> import torch
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> torch.manual_seed(3) # doctest: +IGNORE_RESULT
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> # note: we are loading a DeiTForImageClassificationWithTeacher from the hub here,
+ >>> # so the head will be randomly initialized, hence the predictions will be random
+ >>> image_processor = AutoImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224")
+ >>> model = DeiTForImageClassification.from_pretrained("facebook/deit-base-distilled-patch16-224")
+
+ >>> inputs = image_processor(images=image, return_tensors="pt")
+ >>> outputs = model(**inputs)
+ >>> logits = outputs.logits
+ >>> # model predicts one of the 1000 ImageNet classes
+ >>> predicted_class_idx = logits.argmax(-1).item()
+ >>> print("Predicted class:", model.config.id2label[predicted_class_idx])
+ Predicted class: Polaroid camera, Polaroid Land camera
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.deit(
+ pixel_values,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ logits = self.classifier(sequence_output[:, 0, :])
+ # we don't use the distillation token
+
+ loss = None
+ if labels is not None:
+ labels = labels.to(logits.device)
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return ImageClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@dataclass
+class DeiTForImageClassificationWithTeacherOutput(ModelOutput):
+ """
+ Output type of [`DeiTForImageClassificationWithTeacher`].
+
+ Args:
+ logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
+ Prediction scores as the average of the cls_logits and distillation logits.
+ cls_logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
+ Prediction scores of the classification head (i.e. the linear layer on top of the final hidden state of the
+ class token).
+ distillation_logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
+ Prediction scores of the distillation head (i.e. the linear layer on top of the final hidden state of the
+ distillation token).
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
+ plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ """
+
+ logits: torch.FloatTensor = None
+ cls_logits: torch.FloatTensor = None
+ distillation_logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@add_start_docstrings(
+ """
+ DeiT Model transformer with image classification heads on top (a linear layer on top of the final hidden state of
+ the [CLS] token and a linear layer on top of the final hidden state of the distillation token) e.g. for ImageNet.
+
+ .. warning::
+
+ This model supports inference-only. Fine-tuning with distillation (i.e. with a teacher) is not yet
+ supported.
+ """,
+ DEIT_START_DOCSTRING,
+)
+class DeiTForImageClassificationWithTeacher(DeiTPreTrainedModel):
+ def __init__(self, config: DeiTConfig) -> None:
+ super().__init__(config)
+
+ self.num_labels = config.num_labels
+ self.deit = DeiTModel(config, add_pooling_layer=False)
+
+ # Classifier heads
+ self.cls_classifier = (
+ nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
+ )
+ self.distillation_classifier = (
+ nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(DEIT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
+ output_type=DeiTForImageClassificationWithTeacherOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
+ )
+ def forward(
+ self,
+ pixel_values: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[tuple, DeiTForImageClassificationWithTeacherOutput]:
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.deit(
+ pixel_values,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ cls_logits = self.cls_classifier(sequence_output[:, 0, :])
+ distillation_logits = self.distillation_classifier(sequence_output[:, 1, :])
+
+ # during inference, return the average of both classifier predictions
+ logits = (cls_logits + distillation_logits) / 2
+
+ if not return_dict:
+ output = (logits, cls_logits, distillation_logits) + outputs[1:]
+ return output
+
+ return DeiTForImageClassificationWithTeacherOutput(
+ logits=logits,
+ cls_logits=cls_logits,
+ distillation_logits=distillation_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/modeling_tf_deit.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/modeling_tf_deit.py
new file mode 100644
index 0000000000000000000000000000000000000000..aec5f6df95922aac77bd8e210b74c37cc17beebe
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/deit/modeling_tf_deit.py
@@ -0,0 +1,1178 @@
+# coding=utf-8
+# Copyright 2022 Facebook AI Research (FAIR) and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" TensorFlow DeiT model."""
+
+
+from __future__ import annotations
+
+import collections.abc
+import math
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import tensorflow as tf
+
+from ...activations_tf import get_tf_activation
+from ...modeling_tf_outputs import (
+ TFBaseModelOutput,
+ TFBaseModelOutputWithPooling,
+ TFImageClassifierOutput,
+ TFMaskedImageModelingOutput,
+)
+from ...modeling_tf_utils import (
+ TFPreTrainedModel,
+ TFSequenceClassificationLoss,
+ get_initializer,
+ keras,
+ keras_serializable,
+ unpack_inputs,
+)
+from ...tf_utils import shape_list, stable_softmax
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_deit import DeiTConfig
+
+
+logger = logging.get_logger(__name__)
+
+# General docstring
+_CONFIG_FOR_DOC = "DeiTConfig"
+
+# Base docstring
+_CHECKPOINT_FOR_DOC = "facebook/deit-base-distilled-patch16-224"
+_EXPECTED_OUTPUT_SHAPE = [1, 198, 768]
+
+# Image classification docstring
+_IMAGE_CLASS_CHECKPOINT = "facebook/deit-base-distilled-patch16-224"
+_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
+
+
+from ..deprecated._archive_maps import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+@dataclass
+class TFDeiTForImageClassificationWithTeacherOutput(ModelOutput):
+ """
+ Output type of [`DeiTForImageClassificationWithTeacher`].
+
+ Args:
+ logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`):
+ Prediction scores as the average of the cls_logits and distillation logits.
+ cls_logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`):
+ Prediction scores of the classification head (i.e. the linear layer on top of the final hidden state of the
+ class token).
+ distillation_logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`):
+ Prediction scores of the distillation head (i.e. the linear layer on top of the final hidden state of the
+ distillation token).
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus
+ the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ """
+
+ logits: tf.Tensor = None
+ cls_logits: tf.Tensor = None
+ distillation_logits: tf.Tensor = None
+ hidden_states: Tuple[tf.Tensor] | None = None
+ attentions: Tuple[tf.Tensor] | None = None
+
+
+class TFDeiTEmbeddings(keras.layers.Layer):
+ """
+ Construct the CLS token, distillation token, position and patch embeddings. Optionally, also the mask token.
+ """
+
+ def __init__(self, config: DeiTConfig, use_mask_token: bool = False, **kwargs) -> None:
+ super().__init__(**kwargs)
+ self.config = config
+ self.use_mask_token = use_mask_token
+ self.patch_embeddings = TFDeiTPatchEmbeddings(config=config, name="patch_embeddings")
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob, name="dropout")
+
+ def build(self, input_shape=None):
+ self.cls_token = self.add_weight(
+ shape=(1, 1, self.config.hidden_size),
+ initializer=keras.initializers.zeros(),
+ trainable=True,
+ name="cls_token",
+ )
+ self.distillation_token = self.add_weight(
+ shape=(1, 1, self.config.hidden_size),
+ initializer=keras.initializers.zeros(),
+ trainable=True,
+ name="distillation_token",
+ )
+ self.mask_token = None
+ if self.use_mask_token:
+ self.mask_token = self.add_weight(
+ shape=(1, 1, self.config.hidden_size),
+ initializer=keras.initializers.zeros(),
+ trainable=True,
+ name="mask_token",
+ )
+ num_patches = self.patch_embeddings.num_patches
+ self.position_embeddings = self.add_weight(
+ shape=(1, num_patches + 2, self.config.hidden_size),
+ initializer=keras.initializers.zeros(),
+ trainable=True,
+ name="position_embeddings",
+ )
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "patch_embeddings", None) is not None:
+ with tf.name_scope(self.patch_embeddings.name):
+ self.patch_embeddings.build(None)
+ if getattr(self, "dropout", None) is not None:
+ with tf.name_scope(self.dropout.name):
+ self.dropout.build(None)
+
+ def call(
+ self, pixel_values: tf.Tensor, bool_masked_pos: tf.Tensor | None = None, training: bool = False
+ ) -> tf.Tensor:
+ embeddings = self.patch_embeddings(pixel_values)
+ batch_size, seq_length, _ = shape_list(embeddings)
+
+ if bool_masked_pos is not None:
+ mask_tokens = tf.tile(self.mask_token, [batch_size, seq_length, 1])
+ # replace the masked visual tokens by mask_tokens
+ mask = tf.expand_dims(bool_masked_pos, axis=-1)
+ mask = tf.cast(mask, dtype=mask_tokens.dtype)
+ embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
+
+ cls_tokens = tf.repeat(self.cls_token, repeats=batch_size, axis=0)
+ distillation_tokens = tf.repeat(self.distillation_token, repeats=batch_size, axis=0)
+ embeddings = tf.concat((cls_tokens, distillation_tokens, embeddings), axis=1)
+ embeddings = embeddings + self.position_embeddings
+ embeddings = self.dropout(embeddings, training=training)
+ return embeddings
+
+
+class TFDeiTPatchEmbeddings(keras.layers.Layer):
+ """
+ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
+ `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
+ Transformer.
+ """
+
+ def __init__(self, config: DeiTConfig, **kwargs) -> None:
+ super().__init__(**kwargs)
+ image_size, patch_size = config.image_size, config.patch_size
+ num_channels, hidden_size = config.num_channels, config.hidden_size
+
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.num_patches = num_patches
+
+ self.projection = keras.layers.Conv2D(
+ hidden_size, kernel_size=patch_size, strides=patch_size, name="projection"
+ )
+
+ def call(self, pixel_values: tf.Tensor) -> tf.Tensor:
+ batch_size, height, width, num_channels = shape_list(pixel_values)
+ if tf.executing_eagerly() and num_channels != self.num_channels:
+ raise ValueError(
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
+ )
+ if tf.executing_eagerly() and (height != self.image_size[0] or width != self.image_size[1]):
+ raise ValueError(
+ f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})."
+ )
+ x = self.projection(pixel_values)
+ batch_size, height, width, num_channels = shape_list(x)
+ x = tf.reshape(x, (batch_size, height * width, num_channels))
+ return x
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "projection", None) is not None:
+ with tf.name_scope(self.projection.name):
+ self.projection.build([None, None, None, self.num_channels])
+
+
+# Copied from transformers.models.vit.modeling_tf_vit.TFViTSelfAttention with ViT->DeiT
+class TFDeiTSelfAttention(keras.layers.Layer):
+ def __init__(self, config: DeiTConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ if config.hidden_size % config.num_attention_heads != 0:
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number "
+ f"of attention heads ({config.num_attention_heads})"
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+ self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
+
+ self.query = keras.layers.Dense(
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
+ )
+ self.key = keras.layers.Dense(
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
+ )
+ self.value = keras.layers.Dense(
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
+ )
+ self.dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
+ self.config = config
+
+ def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
+ tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
+
+ # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
+ return tf.transpose(tensor, perm=[0, 2, 1, 3])
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ head_mask: tf.Tensor,
+ output_attentions: bool,
+ training: bool = False,
+ ) -> Tuple[tf.Tensor]:
+ batch_size = shape_list(hidden_states)[0]
+ mixed_query_layer = self.query(inputs=hidden_states)
+ mixed_key_layer = self.key(inputs=hidden_states)
+ mixed_value_layer = self.value(inputs=hidden_states)
+ query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
+ key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
+ value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ # (batch size, num_heads, seq_len_q, seq_len_k)
+ attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
+ dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)
+ attention_scores = tf.divide(attention_scores, dk)
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = stable_softmax(logits=attention_scores, axis=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(inputs=attention_probs, training=training)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = tf.multiply(attention_probs, head_mask)
+
+ attention_output = tf.matmul(attention_probs, value_layer)
+ attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])
+
+ # (batch_size, seq_len_q, all_head_size)
+ attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size))
+ outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "query", None) is not None:
+ with tf.name_scope(self.query.name):
+ self.query.build([None, None, self.config.hidden_size])
+ if getattr(self, "key", None) is not None:
+ with tf.name_scope(self.key.name):
+ self.key.build([None, None, self.config.hidden_size])
+ if getattr(self, "value", None) is not None:
+ with tf.name_scope(self.value.name):
+ self.value.build([None, None, self.config.hidden_size])
+
+
+# Copied from transformers.models.vit.modeling_tf_vit.TFViTSelfOutput with ViT->DeiT
+class TFDeiTSelfOutput(keras.layers.Layer):
+ """
+ The residual connection is defined in TFDeiTLayer instead of here (as is the case with other models), due to the
+ layernorm applied before each block.
+ """
+
+ def __init__(self, config: DeiTConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = keras.layers.Dense(
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
+ hidden_states = self.dense(inputs=hidden_states)
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
+
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+
+
+# Copied from transformers.models.vit.modeling_tf_vit.TFViTAttention with ViT->DeiT
+class TFDeiTAttention(keras.layers.Layer):
+ def __init__(self, config: DeiTConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.self_attention = TFDeiTSelfAttention(config, name="attention")
+ self.dense_output = TFDeiTSelfOutput(config, name="output")
+
+ def prune_heads(self, heads):
+ raise NotImplementedError
+
+ def call(
+ self,
+ input_tensor: tf.Tensor,
+ head_mask: tf.Tensor,
+ output_attentions: bool,
+ training: bool = False,
+ ) -> Tuple[tf.Tensor]:
+ self_outputs = self.self_attention(
+ hidden_states=input_tensor, head_mask=head_mask, output_attentions=output_attentions, training=training
+ )
+ attention_output = self.dense_output(
+ hidden_states=self_outputs[0], input_tensor=input_tensor, training=training
+ )
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "self_attention", None) is not None:
+ with tf.name_scope(self.self_attention.name):
+ self.self_attention.build(None)
+ if getattr(self, "dense_output", None) is not None:
+ with tf.name_scope(self.dense_output.name):
+ self.dense_output.build(None)
+
+
+# Copied from transformers.models.vit.modeling_tf_vit.TFViTIntermediate with ViT->DeiT
+class TFDeiTIntermediate(keras.layers.Layer):
+ def __init__(self, config: DeiTConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = keras.layers.Dense(
+ units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
+ else:
+ self.intermediate_act_fn = config.hidden_act
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ hidden_states = self.dense(inputs=hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+
+
+# Copied from transformers.models.vit.modeling_tf_vit.TFViTOutput with ViT->DeiT
+class TFDeiTOutput(keras.layers.Layer):
+ def __init__(self, config: DeiTConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = keras.layers.Dense(
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
+ hidden_states = self.dense(inputs=hidden_states)
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
+ hidden_states = hidden_states + input_tensor
+
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.intermediate_size])
+
+
+class TFDeiTLayer(keras.layers.Layer):
+ """This corresponds to the Block class in the timm implementation."""
+
+ def __init__(self, config: DeiTConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.attention = TFDeiTAttention(config, name="attention")
+ self.intermediate = TFDeiTIntermediate(config, name="intermediate")
+ self.deit_output = TFDeiTOutput(config, name="output")
+
+ self.layernorm_before = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm_before")
+ self.layernorm_after = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm_after")
+ self.config = config
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ head_mask: tf.Tensor,
+ output_attentions: bool,
+ training: bool = False,
+ ) -> Tuple[tf.Tensor]:
+ attention_outputs = self.attention(
+ # in DeiT, layernorm is applied before self-attention
+ input_tensor=self.layernorm_before(inputs=hidden_states, training=training),
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ training=training,
+ )
+ attention_output = attention_outputs[0]
+
+ # first residual connection
+ hidden_states = attention_output + hidden_states
+
+ # in DeiT, layernorm is also applied after self-attention
+ layer_output = self.layernorm_after(inputs=hidden_states, training=training)
+
+ intermediate_output = self.intermediate(hidden_states=layer_output, training=training)
+
+ # second residual connection is done here
+ layer_output = self.deit_output(
+ hidden_states=intermediate_output, input_tensor=hidden_states, training=training
+ )
+ outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "attention", None) is not None:
+ with tf.name_scope(self.attention.name):
+ self.attention.build(None)
+ if getattr(self, "intermediate", None) is not None:
+ with tf.name_scope(self.intermediate.name):
+ self.intermediate.build(None)
+ if getattr(self, "deit_output", None) is not None:
+ with tf.name_scope(self.deit_output.name):
+ self.deit_output.build(None)
+ if getattr(self, "layernorm_before", None) is not None:
+ with tf.name_scope(self.layernorm_before.name):
+ self.layernorm_before.build([None, None, self.config.hidden_size])
+ if getattr(self, "layernorm_after", None) is not None:
+ with tf.name_scope(self.layernorm_after.name):
+ self.layernorm_after.build([None, None, self.config.hidden_size])
+
+
+# Copied from transformers.models.vit.modeling_tf_vit.TFViTEncoder with ViT->DeiT
+class TFDeiTEncoder(keras.layers.Layer):
+ def __init__(self, config: DeiTConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.layer = [TFDeiTLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ head_mask: tf.Tensor,
+ output_attentions: bool,
+ output_hidden_states: bool,
+ return_dict: bool,
+ training: bool = False,
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_outputs = layer_module(
+ hidden_states=hidden_states,
+ head_mask=head_mask[i],
+ output_attentions=output_attentions,
+ training=training,
+ )
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ # Add last layer
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
+
+ return TFBaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "layer", None) is not None:
+ for layer in self.layer:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+@keras_serializable
+class TFDeiTMainLayer(keras.layers.Layer):
+ config_class = DeiTConfig
+
+ def __init__(
+ self, config: DeiTConfig, add_pooling_layer: bool = True, use_mask_token: bool = False, **kwargs
+ ) -> None:
+ super().__init__(**kwargs)
+ self.config = config
+
+ self.embeddings = TFDeiTEmbeddings(config, use_mask_token=use_mask_token, name="embeddings")
+ self.encoder = TFDeiTEncoder(config, name="encoder")
+
+ self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
+ self.pooler = TFDeiTPooler(config, name="pooler") if add_pooling_layer else None
+
+ def get_input_embeddings(self) -> TFDeiTPatchEmbeddings:
+ return self.embeddings.patch_embeddings
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ raise NotImplementedError
+
+ def get_head_mask(self, head_mask):
+ if head_mask is not None:
+ raise NotImplementedError
+ else:
+ head_mask = [None] * self.config.num_hidden_layers
+
+ return head_mask
+
+ @unpack_inputs
+ def call(
+ self,
+ pixel_values: tf.Tensor | None = None,
+ bool_masked_pos: tf.Tensor | None = None,
+ head_mask: tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor, ...]]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ # TF 2.0 image layers can't use NCHW format when running on CPU.
+ # (batch_size, num_channels, height, width) -> (batch_size, height, width, num_channels)
+ pixel_values = tf.transpose(pixel_values, (0, 2, 3, 1))
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask)
+
+ embedding_output = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos, training=training)
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = encoder_outputs[0]
+ sequence_output = self.layernorm(sequence_output, training=training)
+ pooled_output = self.pooler(sequence_output, training=training) if self.pooler is not None else None
+
+ if not return_dict:
+ head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,)
+ return head_outputs + encoder_outputs[1:]
+
+ return TFBaseModelOutputWithPooling(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embeddings", None) is not None:
+ with tf.name_scope(self.embeddings.name):
+ self.embeddings.build(None)
+ if getattr(self, "encoder", None) is not None:
+ with tf.name_scope(self.encoder.name):
+ self.encoder.build(None)
+ if getattr(self, "layernorm", None) is not None:
+ with tf.name_scope(self.layernorm.name):
+ self.layernorm.build([None, None, self.config.hidden_size])
+ if getattr(self, "pooler", None) is not None:
+ with tf.name_scope(self.pooler.name):
+ self.pooler.build(None)
+
+
+# Copied from transformers.models.vit.modeling_tf_vit.TFViTPreTrainedModel with ViT->DeiT all-casing
+class TFDeiTPreTrainedModel(TFPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = DeiTConfig
+ base_model_prefix = "deit"
+ main_input_name = "pixel_values"
+
+
+DEIT_START_DOCSTRING = r"""
+ This model is a TensorFlow
+ [keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer). Use it as a regular
+ TensorFlow Module and refer to the TensorFlow documentation for all matter related to general usage and behavior.
+
+ Parameters:
+ config ([`DeiTConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+DEIT_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
+ [`DeiTImageProcessor.__call__`] for details.
+
+ head_mask (`tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare DeiT Model transformer outputting raw hidden-states without any specific head on top.",
+ DEIT_START_DOCSTRING,
+)
+class TFDeiTModel(TFDeiTPreTrainedModel):
+ def __init__(
+ self, config: DeiTConfig, add_pooling_layer: bool = True, use_mask_token: bool = False, **kwargs
+ ) -> None:
+ super().__init__(config, **kwargs)
+
+ self.deit = TFDeiTMainLayer(
+ config, add_pooling_layer=add_pooling_layer, use_mask_token=use_mask_token, name="deit"
+ )
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DEIT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFBaseModelOutputWithPooling,
+ config_class=_CONFIG_FOR_DOC,
+ modality="vision",
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
+ )
+ def call(
+ self,
+ pixel_values: tf.Tensor | None = None,
+ bool_masked_pos: tf.Tensor | None = None,
+ head_mask: tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[Tuple, TFBaseModelOutputWithPooling]:
+ outputs = self.deit(
+ pixel_values=pixel_values,
+ bool_masked_pos=bool_masked_pos,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "deit", None) is not None:
+ with tf.name_scope(self.deit.name):
+ self.deit.build(None)
+
+
+# Copied from transformers.models.vit.modeling_tf_vit.TFViTPooler with ViT->DeiT
+class TFDeiTPooler(keras.layers.Layer):
+ def __init__(self, config: DeiTConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = keras.layers.Dense(
+ units=config.hidden_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ activation="tanh",
+ name="dense",
+ )
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(inputs=first_token_tensor)
+
+ return pooled_output
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+
+
+class TFDeitPixelShuffle(keras.layers.Layer):
+ """TF layer implementation of torch.nn.PixelShuffle"""
+
+ def __init__(self, upscale_factor: int, **kwargs) -> None:
+ super().__init__(**kwargs)
+ if not isinstance(upscale_factor, int) or upscale_factor < 2:
+ raise ValueError(f"upscale_factor must be an integer value >= 2 got {upscale_factor}")
+ self.upscale_factor = upscale_factor
+
+ def call(self, x: tf.Tensor) -> tf.Tensor:
+ hidden_states = x
+ batch_size, _, _, num_input_channels = shape_list(hidden_states)
+ block_size_squared = self.upscale_factor**2
+ output_depth = int(num_input_channels / block_size_squared)
+ # When the number of output channels >= 2, PyTorch's PixelShuffle and
+ # TF's depth_to_space differ in their output as the order of channels selected for combining
+ # is a permutation of the other c.f.
+ # https://stackoverflow.com/questions/68272502/tf-depth-to-space-not-same-as-torchs-pixelshuffle-when-output-channels-1
+ permutation = tf.constant(
+ [[i + j * block_size_squared for i in range(block_size_squared) for j in range(output_depth)]]
+ )
+ hidden_states = tf.gather(params=hidden_states, indices=tf.tile(permutation, [batch_size, 1]), batch_dims=-1)
+ hidden_states = tf.nn.depth_to_space(hidden_states, block_size=self.upscale_factor, data_format="NHWC")
+ return hidden_states
+
+
+class TFDeitDecoder(keras.layers.Layer):
+ def __init__(self, config: DeiTConfig, **kwargs) -> None:
+ super().__init__(**kwargs)
+ self.conv2d = keras.layers.Conv2D(
+ filters=config.encoder_stride**2 * config.num_channels, kernel_size=1, name="0"
+ )
+ self.pixel_shuffle = TFDeitPixelShuffle(config.encoder_stride, name="1")
+ self.config = config
+
+ def call(self, inputs: tf.Tensor, training: bool = False) -> tf.Tensor:
+ hidden_states = inputs
+ hidden_states = self.conv2d(hidden_states)
+ hidden_states = self.pixel_shuffle(hidden_states)
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "conv2d", None) is not None:
+ with tf.name_scope(self.conv2d.name):
+ self.conv2d.build([None, None, None, self.config.hidden_size])
+ if getattr(self, "pixel_shuffle", None) is not None:
+ with tf.name_scope(self.pixel_shuffle.name):
+ self.pixel_shuffle.build(None)
+
+
+@add_start_docstrings(
+ "DeiT Model with a decoder on top for masked image modeling, as proposed in"
+ " [SimMIM](https://arxiv.org/abs/2111.09886).",
+ DEIT_START_DOCSTRING,
+)
+class TFDeiTForMaskedImageModeling(TFDeiTPreTrainedModel):
+ def __init__(self, config: DeiTConfig) -> None:
+ super().__init__(config)
+
+ self.deit = TFDeiTMainLayer(config, add_pooling_layer=False, use_mask_token=True, name="deit")
+ self.decoder = TFDeitDecoder(config, name="decoder")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DEIT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TFMaskedImageModelingOutput, config_class=_CONFIG_FOR_DOC)
+ def call(
+ self,
+ pixel_values: tf.Tensor | None = None,
+ bool_masked_pos: tf.Tensor | None = None,
+ head_mask: tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[tuple, TFMaskedImageModelingOutput]:
+ r"""
+ bool_masked_pos (`tf.Tensor` of type bool and shape `(batch_size, num_patches)`):
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
+
+ Returns:
+
+ Examples:
+ ```python
+ >>> from transformers import AutoImageProcessor, TFDeiTForMaskedImageModeling
+ >>> import tensorflow as tf
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224")
+ >>> model = TFDeiTForMaskedImageModeling.from_pretrained("facebook/deit-base-distilled-patch16-224")
+
+ >>> num_patches = (model.config.image_size // model.config.patch_size) ** 2
+ >>> pixel_values = image_processor(images=image, return_tensors="tf").pixel_values
+ >>> # create random boolean mask of shape (batch_size, num_patches)
+ >>> bool_masked_pos = tf.cast(tf.random.uniform((1, num_patches), minval=0, maxval=2, dtype=tf.int32), tf.bool)
+
+ >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
+ >>> loss, reconstructed_pixel_values = outputs.loss, outputs.reconstruction
+ >>> list(reconstructed_pixel_values.shape)
+ [1, 3, 224, 224]
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.deit(
+ pixel_values,
+ bool_masked_pos=bool_masked_pos,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ sequence_output = outputs[0]
+
+ # Reshape to (batch_size, num_channels, height, width)
+ sequence_output = sequence_output[:, 1:-1]
+ batch_size, sequence_length, num_channels = shape_list(sequence_output)
+ height = width = int(sequence_length**0.5)
+ sequence_output = tf.reshape(sequence_output, (batch_size, height, width, num_channels))
+
+ # Reconstruct pixel values
+ reconstructed_pixel_values = self.decoder(sequence_output, training=training)
+ # TF 2.0 image layers can't use NCHW format when running on CPU, so intermediate layers use NHWC,
+ # including the decoder. We transpose to compute the loss against the pixel values
+ # (batch_size, height, width, num_channels) -> (batch_size, num_channels, height, width)
+ reconstructed_pixel_values = tf.transpose(reconstructed_pixel_values, (0, 3, 1, 2))
+
+ masked_im_loss = None
+ if bool_masked_pos is not None:
+ size = self.config.image_size // self.config.patch_size
+ bool_masked_pos = tf.reshape(bool_masked_pos, (-1, size, size))
+ mask = tf.repeat(bool_masked_pos, self.config.patch_size, 1)
+ mask = tf.repeat(mask, self.config.patch_size, 2)
+ mask = tf.expand_dims(mask, 1)
+ mask = tf.cast(mask, tf.float32)
+
+ reconstruction_loss = keras.losses.mean_absolute_error(
+ # Swap axes as metric calculation reduces over the final dimension
+ tf.transpose(pixel_values, (1, 2, 3, 0)),
+ tf.transpose(reconstructed_pixel_values, (1, 2, 3, 0)),
+ )
+ reconstruction_loss = tf.expand_dims(reconstruction_loss, 0)
+ total_loss = tf.reduce_sum(reconstruction_loss * mask)
+ num_masked_pixels = (tf.reduce_sum(mask) + 1e-5) * self.config.num_channels
+ masked_im_loss = total_loss / num_masked_pixels
+ masked_im_loss = tf.reshape(masked_im_loss, (1,))
+
+ if not return_dict:
+ output = (reconstructed_pixel_values,) + outputs[1:]
+ return ((masked_im_loss,) + output) if masked_im_loss is not None else output
+
+ return TFMaskedImageModelingOutput(
+ loss=masked_im_loss,
+ reconstruction=reconstructed_pixel_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "deit", None) is not None:
+ with tf.name_scope(self.deit.name):
+ self.deit.build(None)
+ if getattr(self, "decoder", None) is not None:
+ with tf.name_scope(self.decoder.name):
+ self.decoder.build(None)
+
+
+@add_start_docstrings(
+ """
+ DeiT Model transformer with an image classification head on top (a linear layer on top of the final hidden state of
+ the [CLS] token) e.g. for ImageNet.
+ """,
+ DEIT_START_DOCSTRING,
+)
+class TFDeiTForImageClassification(TFDeiTPreTrainedModel, TFSequenceClassificationLoss):
+ def __init__(self, config: DeiTConfig):
+ super().__init__(config)
+
+ self.num_labels = config.num_labels
+ self.deit = TFDeiTMainLayer(config, add_pooling_layer=False, name="deit")
+
+ # Classifier head
+ self.classifier = (
+ keras.layers.Dense(config.num_labels, name="classifier")
+ if config.num_labels > 0
+ else keras.layers.Activation("linear", name="classifier")
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DEIT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TFImageClassifierOutput, config_class=_CONFIG_FOR_DOC)
+ def call(
+ self,
+ pixel_values: tf.Tensor | None = None,
+ head_mask: tf.Tensor | None = None,
+ labels: tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[tf.Tensor, TFImageClassifierOutput]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, TFDeiTForImageClassification
+ >>> import tensorflow as tf
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> keras.utils.set_random_seed(3) # doctest: +IGNORE_RESULT
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> # note: we are loading a TFDeiTForImageClassificationWithTeacher from the hub here,
+ >>> # so the head will be randomly initialized, hence the predictions will be random
+ >>> image_processor = AutoImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224")
+ >>> model = TFDeiTForImageClassification.from_pretrained("facebook/deit-base-distilled-patch16-224")
+
+ >>> inputs = image_processor(images=image, return_tensors="tf")
+ >>> outputs = model(**inputs)
+ >>> logits = outputs.logits
+ >>> # model predicts one of the 1000 ImageNet classes
+ >>> predicted_class_idx = tf.math.argmax(logits, axis=-1)[0]
+ >>> print("Predicted class:", model.config.id2label[int(predicted_class_idx)])
+ Predicted class: little blue heron, Egretta caerulea
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.deit(
+ pixel_values,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ sequence_output = outputs[0]
+
+ logits = self.classifier(sequence_output[:, 0, :])
+ # we don't use the distillation token
+
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFImageClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "deit", None) is not None:
+ with tf.name_scope(self.deit.name):
+ self.deit.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.hidden_size])
+
+
+@add_start_docstrings(
+ """
+ DeiT Model transformer with image classification heads on top (a linear layer on top of the final hidden state of
+ the [CLS] token and a linear layer on top of the final hidden state of the distillation token) e.g. for ImageNet.
+
+ .. warning::
+
+ This model supports inference-only. Fine-tuning with distillation (i.e. with a teacher) is not yet
+ supported.
+ """,
+ DEIT_START_DOCSTRING,
+)
+class TFDeiTForImageClassificationWithTeacher(TFDeiTPreTrainedModel):
+ def __init__(self, config: DeiTConfig) -> None:
+ super().__init__(config)
+
+ self.num_labels = config.num_labels
+ self.deit = TFDeiTMainLayer(config, add_pooling_layer=False, name="deit")
+
+ # Classifier heads
+ self.cls_classifier = (
+ keras.layers.Dense(config.num_labels, name="cls_classifier")
+ if config.num_labels > 0
+ else keras.layers.Activation("linear", name="cls_classifier")
+ )
+ self.distillation_classifier = (
+ keras.layers.Dense(config.num_labels, name="distillation_classifier")
+ if config.num_labels > 0
+ else keras.layers.Activation("linear", name="distillation_classifier")
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DEIT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
+ output_type=TFDeiTForImageClassificationWithTeacherOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
+ )
+ def call(
+ self,
+ pixel_values: tf.Tensor | None = None,
+ head_mask: tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[tuple, TFDeiTForImageClassificationWithTeacherOutput]:
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.deit(
+ pixel_values,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ sequence_output = outputs[0]
+
+ cls_logits = self.cls_classifier(sequence_output[:, 0, :])
+ distillation_logits = self.distillation_classifier(sequence_output[:, 1, :])
+
+ # during inference, return the average of both classifier predictions
+ logits = (cls_logits + distillation_logits) / 2
+
+ if not return_dict:
+ output = (logits, cls_logits, distillation_logits) + outputs[1:]
+ return output
+
+ return TFDeiTForImageClassificationWithTeacherOutput(
+ logits=logits,
+ cls_logits=cls_logits,
+ distillation_logits=distillation_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "deit", None) is not None:
+ with tf.name_scope(self.deit.name):
+ self.deit.build(None)
+ if getattr(self, "cls_classifier", None) is not None:
+ with tf.name_scope(self.cls_classifier.name):
+ self.cls_classifier.build([None, None, self.config.hidden_size])
+ if getattr(self, "distillation_classifier", None) is not None:
+ with tf.name_scope(self.distillation_classifier.name):
+ self.distillation_classifier.build([None, None, self.config.hidden_size])
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b3d3e0d7c51161c79c66d6e42ad8d65655c8a6e1
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/configuration_dinov2.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/configuration_dinov2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5a12c2b112ef3e4fd5f9a8e30a4133547f77cdd5
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/configuration_dinov2.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/convert_dinov2_to_hf.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/convert_dinov2_to_hf.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8ef4a93a84134eb250684d37aa9feed179b4ad9e
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/convert_dinov2_to_hf.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/modeling_dinov2.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/modeling_dinov2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..05526bc2473204c45b57a069b3ef6a5432d6eac7
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/modeling_dinov2.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/dinov2/configuration_dinov2.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/dinov2/configuration_dinov2.py
new file mode 100644
index 0000000000000000000000000000000000000000..b5fe872a706fc71808562e8152db4eee4ca7218f
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/dinov2/configuration_dinov2.py
@@ -0,0 +1,175 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" DINOv2 model configuration"""
+
+from collections import OrderedDict
+from typing import Mapping
+
+from packaging import version
+
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxConfig
+from ...utils import logging
+from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import DINOV2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class Dinov2Config(BackboneConfigMixin, PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`Dinov2Model`]. It is used to instantiate an
+ Dinov2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the Dinov2
+ [google/dinov2-base-patch16-224](https://huggingface.co/google/dinov2-base-patch16-224) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ mlp_ratio (`int`, *optional*, defaults to 4):
+ Ratio of the hidden size of the MLPs relative to the `hidden_size`.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
+ The epsilon used by the layer normalization layers.
+ image_size (`int`, *optional*, defaults to 224):
+ The size (resolution) of each image.
+ patch_size (`int`, *optional*, defaults to 16):
+ The size (resolution) of each patch.
+ num_channels (`int`, *optional*, defaults to 3):
+ The number of input channels.
+ qkv_bias (`bool`, *optional*, defaults to `True`):
+ Whether to add a bias to the queries, keys and values.
+ layerscale_value (`float`, *optional*, defaults to 1.0):
+ Initial value to use for layer scale.
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
+ Stochastic depth rate per sample (when applied in the main path of residual layers).
+ use_swiglu_ffn (`bool`, *optional*, defaults to `False`):
+ Whether to use the SwiGLU feedforward neural network.
+ out_features (`List[str]`, *optional*):
+ If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
+ (depending on how many stages the model has). If unset and `out_indices` is set, will default to the
+ corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
+ same order as defined in the `stage_names` attribute.
+ out_indices (`List[int]`, *optional*):
+ If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
+ many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
+ If unset and `out_features` is unset, will default to the last stage. Must be in the
+ same order as defined in the `stage_names` attribute.
+ apply_layernorm (`bool`, *optional*, defaults to `True`):
+ Whether to apply layer normalization to the feature maps in case the model is used as backbone.
+ reshape_hidden_states (`bool`, *optional*, defaults to `True`):
+ Whether to reshape the feature maps to 4D tensors of shape `(batch_size, hidden_size, height, width)` in
+ case the model is used as backbone. If `False`, the feature maps will be 3D tensors of shape `(batch_size,
+ seq_len, hidden_size)`.
+
+ Example:
+
+ ```python
+ >>> from transformers import Dinov2Config, Dinov2Model
+
+ >>> # Initializing a Dinov2 dinov2-base-patch16-224 style configuration
+ >>> configuration = Dinov2Config()
+
+ >>> # Initializing a model (with random weights) from the dinov2-base-patch16-224 style configuration
+ >>> model = Dinov2Model(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "dinov2"
+
+ def __init__(
+ self,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ mlp_ratio=4,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.0,
+ attention_probs_dropout_prob=0.0,
+ initializer_range=0.02,
+ layer_norm_eps=1e-6,
+ image_size=224,
+ patch_size=16,
+ num_channels=3,
+ qkv_bias=True,
+ layerscale_value=1.0,
+ drop_path_rate=0.0,
+ use_swiglu_ffn=False,
+ out_features=None,
+ out_indices=None,
+ apply_layernorm=True,
+ reshape_hidden_states=True,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.mlp_ratio = mlp_ratio
+ self.hidden_act = hidden_act
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.qkv_bias = qkv_bias
+ self.layerscale_value = layerscale_value
+ self.drop_path_rate = drop_path_rate
+ self.use_swiglu_ffn = use_swiglu_ffn
+ self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, num_hidden_layers + 1)]
+ self._out_features, self._out_indices = get_aligned_output_features_output_indices(
+ out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
+ )
+ self.apply_layernorm = apply_layernorm
+ self.reshape_hidden_states = reshape_hidden_states
+
+
+class Dinov2OnnxConfig(OnnxConfig):
+ torch_onnx_minimum_version = version.parse("1.11")
+
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ return OrderedDict(
+ [
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
+ ]
+ )
+
+ @property
+ def atol_for_validation(self) -> float:
+ return 1e-4
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/dinov2/modeling_dinov2.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/dinov2/modeling_dinov2.py
new file mode 100644
index 0000000000000000000000000000000000000000..c25022f6ec22d8d4afa1f926af9eb6e4d03adb35
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/dinov2/modeling_dinov2.py
@@ -0,0 +1,856 @@
+# coding=utf-8
+# Copyright 2023 Meta AI and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch DINOv2 model."""
+
+
+import collections.abc
+import math
+from typing import Dict, List, Optional, Set, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import (
+ BackboneOutput,
+ BaseModelOutput,
+ BaseModelOutputWithPooling,
+ ImageClassifierOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from ...utils.backbone_utils import BackboneMixin
+from .configuration_dinov2 import Dinov2Config
+
+
+logger = logging.get_logger(__name__)
+
+# General docstring
+_CONFIG_FOR_DOC = "Dinov2Config"
+
+# Base docstring
+_CHECKPOINT_FOR_DOC = "facebook/dinov2-base"
+_EXPECTED_OUTPUT_SHAPE = [1, 257, 768]
+
+# Image classification docstring
+_IMAGE_CLASS_CHECKPOINT = "facebook/dinov2-small-imagenet1k-1-layer"
+_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
+
+
+from ..deprecated._archive_maps import DINOV2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+class Dinov2Embeddings(nn.Module):
+ """
+ Construct the CLS token, mask token, position and patch embeddings.
+ """
+
+ def __init__(self, config: Dinov2Config) -> None:
+ super().__init__()
+
+ self.cls_token = nn.Parameter(torch.randn(1, 1, config.hidden_size))
+ self.mask_token = nn.Parameter(torch.zeros(1, config.hidden_size))
+ self.patch_embeddings = Dinov2PatchEmbeddings(config)
+ num_patches = self.patch_embeddings.num_patches
+ self.position_embeddings = nn.Parameter(torch.randn(1, num_patches + 1, config.hidden_size))
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.config = config
+
+ def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
+ """
+ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
+ resolution images.
+
+ Source:
+ https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174
+ """
+
+ num_patches = embeddings.shape[1] - 1
+ num_positions = self.position_embeddings.shape[1] - 1
+ if num_patches == num_positions and height == width:
+ return self.position_embeddings
+ class_pos_embed = self.position_embeddings[:, 0]
+ patch_pos_embed = self.position_embeddings[:, 1:]
+ dim = embeddings.shape[-1]
+ height = height // self.config.patch_size
+ width = width // self.config.patch_size
+ # we add a small number to avoid floating point error in the interpolation
+ # see discussion at https://github.com/facebookresearch/dino/issues/8
+ height, width = height + 0.1, width + 0.1
+ patch_pos_embed = patch_pos_embed.reshape(1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim)
+ patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
+ target_dtype = patch_pos_embed.dtype
+ patch_pos_embed = nn.functional.interpolate(
+ patch_pos_embed.to(dtype=torch.float32),
+ scale_factor=(float(height / math.sqrt(num_positions)), float(width / math.sqrt(num_positions))),
+ mode="bicubic",
+ align_corners=False,
+ ).to(dtype=target_dtype)
+ if int(height) != patch_pos_embed.shape[-2] or int(width) != patch_pos_embed.shape[-1]:
+ raise ValueError("Width or height does not match with the interpolated position embeddings")
+ patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
+ return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
+
+ def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.Tensor] = None) -> torch.Tensor:
+ batch_size, _, height, width = pixel_values.shape
+ target_dtype = self.patch_embeddings.projection.weight.dtype
+ embeddings = self.patch_embeddings(pixel_values.to(dtype=target_dtype))
+
+ if bool_masked_pos is not None:
+ embeddings = torch.where(
+ bool_masked_pos.unsqueeze(-1), self.mask_token.to(embeddings.dtype).unsqueeze(0), embeddings
+ )
+
+ # add the [CLS] token to the embedded patch tokens
+ cls_tokens = self.cls_token.expand(batch_size, -1, -1)
+ embeddings = torch.cat((cls_tokens, embeddings), dim=1)
+
+ # add positional encoding to each token
+ embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
+
+ embeddings = self.dropout(embeddings)
+
+ return embeddings
+
+
+class Dinov2PatchEmbeddings(nn.Module):
+ """
+ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
+ `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
+ Transformer.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ image_size, patch_size = config.image_size, config.patch_size
+ num_channels, hidden_size = config.num_channels, config.hidden_size
+
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.num_patches = num_patches
+
+ self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
+
+ def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
+ num_channels = pixel_values.shape[1]
+ if num_channels != self.num_channels:
+ raise ValueError(
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
+ f" Expected {self.num_channels} but got {num_channels}."
+ )
+ embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2)
+ return embeddings
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTSelfAttention with ViT->Dinov2
+class Dinov2SelfAttention(nn.Module):
+ def __init__(self, config: Dinov2Config) -> None:
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
+ f"heads {config.num_attention_heads}."
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
+ mixed_query_layer = self.query(hidden_states)
+
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ return outputs
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->Dinov2
+class Dinov2SelfOutput(nn.Module):
+ """
+ The residual connection is defined in Dinov2Layer instead of here (as is the case with other models), due to the
+ layernorm applied before each block.
+ """
+
+ def __init__(self, config: Dinov2Config) -> None:
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ return hidden_states
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->Dinov2
+class Dinov2Attention(nn.Module):
+ def __init__(self, config: Dinov2Config) -> None:
+ super().__init__()
+ self.attention = Dinov2SelfAttention(config)
+ self.output = Dinov2SelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads: Set[int]) -> None:
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.attention.query = prune_linear_layer(self.attention.query, index)
+ self.attention.key = prune_linear_layer(self.attention.key, index)
+ self.attention.value = prune_linear_layer(self.attention.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
+ self_outputs = self.attention(hidden_states, head_mask, output_attentions)
+
+ attention_output = self.output(self_outputs[0], hidden_states)
+
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+class Dinov2LayerScale(nn.Module):
+ def __init__(self, config) -> None:
+ super().__init__()
+ self.lambda1 = nn.Parameter(config.layerscale_value * torch.ones(config.hidden_size))
+
+ def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
+ return hidden_state * self.lambda1
+
+
+# Copied from transformers.models.beit.modeling_beit.drop_path
+def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
+ """
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
+
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
+ argument.
+ """
+ if drop_prob == 0.0 or not training:
+ return input
+ keep_prob = 1 - drop_prob
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
+ random_tensor.floor_() # binarize
+ output = input.div(keep_prob) * random_tensor
+ return output
+
+
+# Copied from transformers.models.beit.modeling_beit.BeitDropPath
+class Dinov2DropPath(nn.Module):
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
+
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
+ super().__init__()
+ self.drop_prob = drop_prob
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ return drop_path(hidden_states, self.drop_prob, self.training)
+
+ def extra_repr(self) -> str:
+ return "p={}".format(self.drop_prob)
+
+
+class Dinov2MLP(nn.Module):
+ def __init__(self, config) -> None:
+ super().__init__()
+ in_features = out_features = config.hidden_size
+ hidden_features = int(config.hidden_size * config.mlp_ratio)
+ self.fc1 = nn.Linear(in_features, hidden_features, bias=True)
+ if isinstance(config.hidden_act, str):
+ self.activation = ACT2FN[config.hidden_act]
+ else:
+ self.activation = config.hidden_act
+ self.fc2 = nn.Linear(hidden_features, out_features, bias=True)
+
+ def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
+ hidden_state = self.fc1(hidden_state)
+ hidden_state = self.activation(hidden_state)
+ hidden_state = self.fc2(hidden_state)
+ return hidden_state
+
+
+class Dinov2SwiGLUFFN(nn.Module):
+ def __init__(self, config) -> None:
+ super().__init__()
+ in_features = out_features = config.hidden_size
+ hidden_features = int(config.hidden_size * config.mlp_ratio)
+ hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8
+
+ self.weights_in = nn.Linear(in_features, 2 * hidden_features, bias=True)
+ self.weights_out = nn.Linear(hidden_features, out_features, bias=True)
+
+ def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
+ hidden_state = self.weights_in(hidden_state)
+ x1, x2 = hidden_state.chunk(2, dim=-1)
+ hidden = nn.functional.silu(x1) * x2
+ return self.weights_out(hidden)
+
+
+class Dinov2Layer(nn.Module):
+ """This corresponds to the Block class in the original implementation."""
+
+ def __init__(self, config: Dinov2Config) -> None:
+ super().__init__()
+
+ self.norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.attention = Dinov2Attention(config)
+ self.layer_scale1 = Dinov2LayerScale(config)
+ self.drop_path = Dinov2DropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity()
+
+ self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ if config.use_swiglu_ffn:
+ self.mlp = Dinov2SwiGLUFFN(config)
+ else:
+ self.mlp = Dinov2MLP(config)
+ self.layer_scale2 = Dinov2LayerScale(config)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
+ self_attention_outputs = self.attention(
+ self.norm1(hidden_states), # in Dinov2, layernorm is applied before self-attention
+ head_mask,
+ output_attentions=output_attentions,
+ )
+ attention_output = self_attention_outputs[0]
+
+ attention_output = self.layer_scale1(attention_output)
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ # first residual connection
+ hidden_states = self.drop_path(attention_output) + hidden_states
+
+ # in Dinov2, layernorm is also applied after self-attention
+ layer_output = self.norm2(hidden_states)
+ layer_output = self.mlp(layer_output)
+ layer_output = self.layer_scale2(layer_output)
+
+ # second residual connection
+ layer_output = self.drop_path(layer_output) + hidden_states
+
+ outputs = (layer_output,) + outputs
+
+ return outputs
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTEncoder with ViT->Dinov2
+class Dinov2Encoder(nn.Module):
+ def __init__(self, config: Dinov2Config) -> None:
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList([Dinov2Layer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ) -> Union[tuple, BaseModelOutput]:
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ layer_head_mask,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions)
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+
+class Dinov2PreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = Dinov2Config
+ base_model_prefix = "dinov2"
+ main_input_name = "pixel_values"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
+ """Initialize the weights"""
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
+ # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid
+ # `trunc_normal_cpu` not implemented in `half` issues
+ module.weight.data = nn.init.trunc_normal_(
+ module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range
+ ).to(module.weight.dtype)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+ elif isinstance(module, Dinov2Embeddings):
+ module.position_embeddings.data = nn.init.trunc_normal_(
+ module.position_embeddings.data.to(torch.float32),
+ mean=0.0,
+ std=self.config.initializer_range,
+ ).to(module.position_embeddings.dtype)
+
+ module.cls_token.data = nn.init.trunc_normal_(
+ module.cls_token.data.to(torch.float32),
+ mean=0.0,
+ std=self.config.initializer_range,
+ ).to(module.cls_token.dtype)
+
+
+DINOV2_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`Dinov2Config`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+DINOV2_BASE_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
+ [`BitImageProcessor.preprocess`] for details.
+
+ bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`):
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Only relevant for
+ pre-training.
+
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+DINOV2_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
+ [`BitImageProcessor.preprocess`] for details.
+
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare DINOv2 Model transformer outputting raw hidden-states without any specific head on top.",
+ DINOV2_START_DOCSTRING,
+)
+class Dinov2Model(Dinov2PreTrainedModel):
+ def __init__(self, config: Dinov2Config):
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = Dinov2Embeddings(config)
+ self.encoder = Dinov2Encoder(config)
+
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self) -> Dinov2PatchEmbeddings:
+ return self.embeddings.patch_embeddings
+
+ def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(DINOV2_BASE_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithPooling,
+ config_class=_CONFIG_FOR_DOC,
+ modality="vision",
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
+ )
+ def forward(
+ self,
+ pixel_values: Optional[torch.Tensor] = None,
+ bool_masked_pos: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ embedding_output = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+ sequence_output = self.layernorm(sequence_output)
+ pooled_output = sequence_output[:, 0, :]
+
+ if not return_dict:
+ head_outputs = (sequence_output, pooled_output)
+ return head_outputs + encoder_outputs[1:]
+
+ return BaseModelOutputWithPooling(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Dinov2 Model transformer with an image classification head on top (a linear layer on top of the final hidden state
+ of the [CLS] token) e.g. for ImageNet.
+ """,
+ DINOV2_START_DOCSTRING,
+)
+class Dinov2ForImageClassification(Dinov2PreTrainedModel):
+ def __init__(self, config: Dinov2Config) -> None:
+ super().__init__(config)
+
+ self.num_labels = config.num_labels
+ self.dinov2 = Dinov2Model(config)
+
+ # Classifier head
+ self.classifier = (
+ nn.Linear(config.hidden_size * 2, config.num_labels) if config.num_labels > 0 else nn.Identity()
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(DINOV2_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
+ output_type=ImageClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
+ )
+ def forward(
+ self,
+ pixel_values: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[tuple, ImageClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.dinov2(
+ pixel_values,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0] # batch_size, sequence_length, hidden_size
+
+ cls_token = sequence_output[:, 0]
+ patch_tokens = sequence_output[:, 1:]
+
+ linear_input = torch.cat([cls_token, patch_tokens.mean(dim=1)], dim=1)
+
+ logits = self.classifier(linear_input)
+
+ loss = None
+ if labels is not None:
+ # move labels to correct device to enable model parallelism
+ labels = labels.to(logits.device)
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return ImageClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Dinov2 backbone, to be used with frameworks like DETR and MaskFormer.
+ """,
+ DINOV2_START_DOCSTRING,
+)
+class Dinov2Backbone(Dinov2PreTrainedModel, BackboneMixin):
+ def __init__(self, config):
+ super().__init__(config)
+ super()._init_backbone(config)
+
+ self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)]
+ self.embeddings = Dinov2Embeddings(config)
+ self.encoder = Dinov2Encoder(config)
+
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self) -> Dinov2PatchEmbeddings:
+ return self.embeddings.patch_embeddings
+
+ @add_start_docstrings_to_model_forward(DINOV2_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: torch.Tensor,
+ output_hidden_states: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> BackboneOutput:
+ """
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, AutoBackbone
+ >>> import torch
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> processor = AutoImageProcessor.from_pretrained("facebook/dinov2-base")
+ >>> model = AutoBackbone.from_pretrained(
+ ... "facebook/dinov2-base", out_features=["stage2", "stage5", "stage8", "stage11"]
+ ... )
+
+ >>> inputs = processor(image, return_tensors="pt")
+
+ >>> outputs = model(**inputs)
+ >>> feature_maps = outputs.feature_maps
+ >>> list(feature_maps[-1].shape)
+ [1, 768, 16, 16]
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+
+ embedding_output = self.embeddings(pixel_values)
+
+ outputs = self.encoder(
+ embedding_output, output_hidden_states=True, output_attentions=output_attentions, return_dict=return_dict
+ )
+
+ hidden_states = outputs.hidden_states if return_dict else outputs[1]
+
+ feature_maps = ()
+ for stage, hidden_state in zip(self.stage_names, hidden_states):
+ if stage in self.out_features:
+ if self.config.apply_layernorm:
+ hidden_state = self.layernorm(hidden_state)
+ if self.config.reshape_hidden_states:
+ hidden_state = hidden_state[:, 1:]
+ # this was actually a bug in the original implementation that we copied here,
+ # cause normally the order is height, width
+ batch_size, _, height, width = pixel_values.shape
+ patch_size = self.config.patch_size
+ hidden_state = hidden_state.reshape(batch_size, height // patch_size, width // patch_size, -1)
+ hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()
+ feature_maps += (hidden_state,)
+
+ if not return_dict:
+ if output_hidden_states:
+ output = (feature_maps,) + outputs[1:]
+ else:
+ output = (feature_maps,) + outputs[2:]
+ return output
+
+ return BackboneOutput(
+ feature_maps=feature_maps,
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
+ attentions=outputs.attentions if output_attentions else None,
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33660eb81e4faebb7938bbba7ba165a2d7079d81
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__init__.py
@@ -0,0 +1,65 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_gpt_bigcode"] = [
+ "GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "GPTBigCodeForSequenceClassification",
+ "GPTBigCodeForTokenClassification",
+ "GPTBigCodeForCausalLM",
+ "GPTBigCodeModel",
+ "GPTBigCodePreTrainedModel",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_gpt_bigcode import (
+ GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
+ GPTBigCodeForCausalLM,
+ GPTBigCodeForSequenceClassification,
+ GPTBigCodeForTokenClassification,
+ GPTBigCodeModel,
+ GPTBigCodePreTrainedModel,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5f0e9259873567c1a54ae6f585b29f51e001555e
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__pycache__/configuration_gpt_bigcode.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__pycache__/configuration_gpt_bigcode.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..35f93c2d4ac5d1b3429caf90072e9d079a721b8a
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__pycache__/configuration_gpt_bigcode.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__pycache__/modeling_gpt_bigcode.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__pycache__/modeling_gpt_bigcode.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c6ece5ffbc0efccd2d5e0d2562a1585a806b6901
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__pycache__/modeling_gpt_bigcode.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/nllb_moe/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/nllb_moe/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ea0f7752ed0cac8d76812a4075bd6217d0db33a6
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/nllb_moe/__init__.py
@@ -0,0 +1,68 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
+
+
+_import_structure = {
+ "configuration_nllb_moe": [
+ "NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
+ "NllbMoeConfig",
+ ]
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_nllb_moe"] = [
+ "NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "NllbMoeForConditionalGeneration",
+ "NllbMoeModel",
+ "NllbMoePreTrainedModel",
+ "NllbMoeTop2Router",
+ "NllbMoeSparseMLP",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_nllb_moe import (
+ NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
+ NllbMoeConfig,
+ )
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_nllb_moe import (
+ NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
+ NllbMoeForConditionalGeneration,
+ NllbMoeModel,
+ NllbMoePreTrainedModel,
+ NllbMoeSparseMLP,
+ NllbMoeTop2Router,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/nllb_moe/configuration_nllb_moe.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/nllb_moe/configuration_nllb_moe.py
new file mode 100644
index 0000000000000000000000000000000000000000..48172824ff2425d224b36d2589b304c238e3bae0
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/nllb_moe/configuration_nllb_moe.py
@@ -0,0 +1,218 @@
+# coding=utf-8
+# Copyright 2023, HuggingFace Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" NLLB-MoE model configuration"""
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class NllbMoeConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`NllbMoeModel`]. It is used to instantiate an
+ NLLB-MoE model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the NLLB-MoE
+ [facebook/nllb-moe-54b](https://huggingface.co/facebook/nllb-moe-54b) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 50265):
+ Vocabulary size of the NllbMoe model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`NllbMoeModel`] or
+ d_model (`int`, *optional*, defaults to 1024):
+ Dimensionality of the layers and the pooler layer.
+ encoder_layers (`int`, *optional*, defaults to 12):
+ Number of encoder layers.
+ decoder_layers (`int`, *optional*, defaults to 12):
+ Number of decoder layers.
+ encoder_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ decoder_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer decoder.
+ decoder_ffn_dim (`int`, *optional*, defaults to 4096):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
+ encoder_ffn_dim (`int`, *optional*, defaults to 4096):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in encoder.
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ activation_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for activations inside the fully connected layer.
+ classifier_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for classifier.
+ max_position_embeddings (`int`, *optional*, defaults to 1024):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ init_std (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
+ for more details.
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
+ for more details.
+ second_expert_policy ( `str`, *optional*, default to `"all"`):
+ The policy used for the sampling the probability of being sampled to a second expert for each token.
+ normalize_router_prob_before_dropping (`bool`, *optional*, defaults to `True`):
+ Whether or not to normalize the router probabilities before applying a mask based on the experts capacity
+ (capacity dropping).
+ batch_prioritized_routing (`bool`, *optional*, defaults to `True`):
+ Whether or not to orders the tokens by their router probabilities before capacity dropping. This means that
+ the tokens that have the highest probabilities will be routed before other tokens that might be further in
+ the sequence.
+ moe_eval_capacity_token_fraction (`float`, *optional*, defaults to 1.0):
+ Fraction of tokens as capacity during validation, if set to negative, uses the same as training. Should be
+ in range: (0.0, 1.0].
+ num_experts (`int`, *optional*, defaults to 128):
+ Number of experts for each NllbMoeSparseMlp layer.
+ expert_capacity (`int`, *optional*, defaults to 64):
+ Number of tokens that can be stored in each expert.
+ encoder_sparse_step (`int`, *optional*, defaults to 4):
+ Frequency of the sparse layers in the encoder. 4 means that one out of 4 layers will be sparse.
+ decoder_sparse_step (`int`, *optional*, defaults to 4):
+ Frequency of the sparse layers in the decoder. 4 means that one out of 4 layers will be sparse.
+ router_dtype (`str`, *optional*, default to `"float32"`):
+ The `dtype` used for the routers. It is preferable to keep the `dtype` to `"float32"` as specified in the
+ *selective precision* discussion in [the paper](https://arxiv.org/abs/2101.03961).
+ router_ignore_padding_tokens (`bool`, *optional*, defaults to `False`):
+ Whether to ignore padding tokens when routing. if `False`, the padding tokens are not routed to any
+ experts.
+ router_bias (`bool`, *optional*, defaults to `False`):
+ Whether or not the classifier of the router should have a bias.
+ moe_token_dropout (`float`, *optional*, defualt ot 0.2):
+ Masking rate for MoE expert output masking (EOM), which is implemented via a Dropout2d on the expert
+ outputs.
+ output_router_logits (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the router logits. Only set to `True` to get the auxiliary loss when training.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models).
+
+ Example:
+
+ ```python
+ >>> from transformers import NllbMoeModel, NllbMoeConfig
+
+ >>> # Initializing a NllbMoe facebook/nllb-moe-54b style configuration
+ >>> configuration = NllbMoeConfig()
+
+ >>> # Initializing a model from the facebook/nllb-moe-54b style configuration
+ >>> model = NllbMoeModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "nllb-moe"
+ keys_to_ignore_at_inference = ["past_key_values"]
+ attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
+
+ def __init__(
+ self,
+ vocab_size=128112,
+ max_position_embeddings=1024,
+ encoder_layers=12,
+ encoder_ffn_dim=4096,
+ encoder_attention_heads=16,
+ decoder_layers=12,
+ decoder_ffn_dim=4096,
+ decoder_attention_heads=16,
+ encoder_layerdrop=0.05,
+ decoder_layerdrop=0.05,
+ use_cache=True,
+ is_encoder_decoder=True,
+ activation_function="relu",
+ d_model=1024,
+ dropout=0.1,
+ attention_dropout=0.1,
+ activation_dropout=0.0,
+ init_std=0.02,
+ decoder_start_token_id=2,
+ scale_embedding=True,
+ router_bias=False,
+ router_dtype="float32",
+ router_ignore_padding_tokens=False,
+ num_experts=128,
+ expert_capacity=64,
+ encoder_sparse_step=4,
+ decoder_sparse_step=4,
+ router_z_loss_coef=0.001,
+ router_aux_loss_coef=0.001,
+ second_expert_policy="all",
+ normalize_router_prob_before_dropping=False,
+ batch_prioritized_routing=False,
+ moe_eval_capacity_token_fraction=1.0,
+ moe_token_dropout=0.2,
+ pad_token_id=1,
+ bos_token_id=0,
+ eos_token_id=2,
+ output_router_logits=False,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.d_model = d_model
+ self.encoder_ffn_dim = encoder_ffn_dim
+ self.encoder_layers = encoder_layers
+ self.encoder_attention_heads = encoder_attention_heads
+ self.decoder_ffn_dim = decoder_ffn_dim
+ self.decoder_layers = decoder_layers
+ self.decoder_attention_heads = decoder_attention_heads
+ self.dropout = dropout
+ self.attention_dropout = attention_dropout
+ self.activation_dropout = activation_dropout
+ self.activation_function = activation_function
+ self.init_std = init_std
+ self.encoder_layerdrop = encoder_layerdrop
+ self.decoder_layerdrop = decoder_layerdrop
+ self.use_cache = use_cache
+ self.num_hidden_layers = encoder_layers
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
+ self.router_z_loss_coef = router_z_loss_coef
+ self.router_aux_loss_coef = router_aux_loss_coef
+ self.decoder_sparse_step = decoder_sparse_step
+ self.encoder_sparse_step = encoder_sparse_step
+ self.num_experts = num_experts
+ self.expert_capacity = expert_capacity
+ self.router_bias = router_bias
+ if router_dtype not in ["float32", "float16", "bfloat16"]:
+ raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}")
+ self.router_dtype = router_dtype
+
+ self.router_ignore_padding_tokens = router_ignore_padding_tokens
+ self.batch_prioritized_routing = batch_prioritized_routing
+ self.second_expert_policy = second_expert_policy
+ self.normalize_router_prob_before_dropping = normalize_router_prob_before_dropping
+ self.moe_eval_capacity_token_fraction = moe_eval_capacity_token_fraction
+ self.moe_token_dropout = moe_token_dropout
+ self.output_router_logits = output_router_logits
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ is_encoder_decoder=is_encoder_decoder,
+ decoder_start_token_id=decoder_start_token_id,
+ **kwargs,
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/nllb_moe/convert_nllb_moe_sharded_original_checkpoint_to_pytorch.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/nllb_moe/convert_nllb_moe_sharded_original_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f98c0ca3d92e038311568613603208259967567
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/nllb_moe/convert_nllb_moe_sharded_original_checkpoint_to_pytorch.py
@@ -0,0 +1,160 @@
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import argparse
+import json
+import os
+
+import torch
+from torch import nn
+
+from transformers import NllbMoeConfig, NllbMoeModel
+from transformers.modeling_utils import dtype_byte_size
+from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
+
+
+def remove_ignore_keys_(state_dict):
+ ignore_keys = [
+ "encoder.version",
+ "decoder.version",
+ "model.encoder.version",
+ "model.decoder.version",
+ "decoder.output_projection.weight",
+ "_float_tensor",
+ "encoder.embed_positions._float_tensor",
+ "decoder.embed_positions._float_tensor",
+ ]
+ for k in ignore_keys:
+ state_dict.pop(k, None)
+
+
+def make_linear_from_emb(emb):
+ vocab_size, emb_size = emb.weight.shape
+ lin_layer = nn.Linear(vocab_size, emb_size, bias=False)
+ lin_layer.weight.data = emb.weight.data
+ return lin_layer
+
+
+def rename_fairseq_keys(state_dict, expert_idx=None):
+ new_dict = {}
+ for old_key in state_dict.keys():
+ key = old_key
+ if "moe_layer.experts." in key:
+ if expert_idx is not None:
+ key = key.replace("moe_layer.experts.0", f"ffn.experts.expert_{expert_idx}")
+ else:
+ key = key.replace("moe_layer.experts.", "ffn.experts.expert_")
+ if "gate" in key:
+ key = key.replace(".moe_layer.gate.wg", ".ffn.router.classifier")
+ if "fc2" and "experts" not in key:
+ key = key.replace(".fc2.", ".ffn.fc2.")
+ if "fc1" and "experts" not in key:
+ key = key.replace(".fc1.", ".ffn.fc1.")
+ if ".encoder_attn." in key:
+ key = key.replace(".encoder_attn.", ".cross_attention.")
+ if "encoder_attn_layer_norm" in key:
+ key = key.replace("encoder_attn_layer_norm", "cross_attention_layer_norm")
+ if "final_layer_norm" in key:
+ key = key.replace("final_layer_norm", "ff_layer_norm")
+ new_dict[key] = state_dict[old_key]
+ return new_dict
+
+
+def shard_on_the_fly(switch_checkpoint_path, dump_path, num_experts, dtype, weights_name: str = WEIGHTS_NAME):
+ sharded_state_dicts = []
+ total_size = 0
+ os.makedirs(dump_path, exist_ok=True)
+
+ for expert in range(num_experts):
+ expert_path = switch_checkpoint_path + f"-rank-{expert}.pt"
+ if os.path.isfile(expert_path):
+ expert_state = torch.load(expert_path)["model"]
+ remove_ignore_keys_(expert_state)
+ expert_state = rename_fairseq_keys(expert_state, expert)
+ save_path = os.path.join(
+ dump_path, weights_name.replace(".bin", f"-{len(sharded_state_dicts)+1:05d}-of-???.bin")
+ )
+ torch.save(expert_state, save_path)
+ sharded_state_dicts.append(expert_state.keys())
+ total_size += sum([value.numel() for key, value in expert_state.items()]) * dtype_byte_size(
+ expert_state[list(expert_state)[0]].dtype
+ )
+
+ # Add the last block
+ save_path = os.path.join(dump_path, weights_name.replace(".bin", f"-{len(sharded_state_dicts)+1:05d}-of-???.bin"))
+ shared_weights = torch.load(switch_checkpoint_path + "-shared.pt")["model"]
+ remove_ignore_keys_(shared_weights)
+ shared_weights = rename_fairseq_keys(shared_weights, None)
+ shared_weights["shared.weight"] = shared_weights["decoder.embed_tokens.weight"]
+ sharded_state_dicts.append(shared_weights.keys())
+
+ # If we only have the shared weights (dummy model/experts saved on the same file)
+ if len(sharded_state_dicts) == 1:
+ save_path = os.path.join(dump_path, weights_name)
+ torch.save(shared_weights, save_path)
+ return {weights_name: sharded_state_dicts[0]}, None
+ else:
+ torch.save(shared_weights, save_path)
+ # Otherwise, let's build the index
+ weight_map = {}
+ for idx, shard in enumerate(sharded_state_dicts):
+ shard_file = weights_name.replace(".bin", f"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.bin")
+ temp_filename = os.path.join(dump_path, weights_name.replace(".bin", f"-{idx+1:05d}-of-???.bin"))
+ os.rename(temp_filename, os.path.join(dump_path, shard_file))
+ for key in shard:
+ weight_map[key] = shard_file
+
+ # Add the metadata
+ metadata = {"total_size": total_size}
+ index = {"metadata": metadata, "weight_map": weight_map}
+
+ with open(os.path.join(dump_path, WEIGHTS_INDEX_NAME), "w", encoding="utf-8") as f:
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
+ f.write(content)
+
+ return metadata, index
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--nllb_moe_checkpoint_path",
+ default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
+ type=str,
+ required=False,
+ help="Path to a directory containing a folder per layer. Follows the original Google format.",
+ )
+ parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
+ parser.add_argument(
+ "--pytorch_dump_folder_path",
+ default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
+ type=str,
+ required=False,
+ help="Path to the output pytorch model.",
+ )
+ args = parser.parse_args()
+ metadata, index = shard_on_the_fly(
+ args.nllb_moe_checkpoint_path,
+ args.pytorch_dump_folder_path,
+ 128,
+ args.dtype,
+ )
+
+ config = NllbMoeConfig.from_pretrained(
+ "facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
+ )
+ config.save_pretrained(args.pytorch_dump_folder_path)
+ model = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
+ print("Done")
+ model.save_pretrained(args.pytorch_dump_folder_path)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/nllb_moe/modeling_nllb_moe.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/nllb_moe/modeling_nllb_moe.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ef66b7bd5740c5def72af038ae832c445089b09
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/nllb_moe/modeling_nllb_moe.py
@@ -0,0 +1,1792 @@
+# coding=utf-8
+# Copyright 2023 NllbMoe Authors and HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch NLLB-MoE model."""
+
+
+import math
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.nn as nn
+from torch.nn import CrossEntropyLoss
+
+from ...activations import ACT2FN
+from ...integrations.deepspeed import is_deepspeed_zero3_enabled
+from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
+from ...modeling_outputs import (
+ MoEModelOutput,
+ MoEModelOutputWithPastAndCrossAttentions,
+ Seq2SeqMoEModelOutput,
+ Seq2SeqMoEOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ add_end_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_nllb_moe import NllbMoeConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "NllbMoeConfig"
+_CHECKPOINT_FOR_DOC = "hf-internal-testing/dummy-nllb-moe-2-experts"
+_REAL_CHECKPOINT_FOR_DOC = "facebook/nllb-moe-54b"
+
+
+####################################################
+# This dict contains ids and associated url
+# for the pretrained weights provided with the models
+####################################################
+
+from ..deprecated._archive_maps import NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+# Copied from transformers.models.bart.modeling_bart.shift_tokens_right
+def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
+ """
+ Shift input ids one token to the right.
+ """
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
+ shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
+ shifted_input_ids[:, 0] = decoder_start_token_id
+
+ if pad_token_id is None:
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
+ # replace possible -100 values in labels by `pad_token_id`
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
+
+ return shifted_input_ids
+
+
+# Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids
+def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
+ """
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
+ are ignored. This is modified from fairseq's `utils.make_positions`.
+
+ Args:
+ x: torch.Tensor x:
+
+ Returns: torch.Tensor
+ """
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
+ mask = input_ids.ne(padding_idx).int()
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
+ return incremental_indices.long() + padding_idx
+
+
+def load_balancing_loss_func(router_probs: torch.Tensor, expert_indices: torch.Tensor) -> float:
+ r"""
+ Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
+
+ See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss
+ function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
+ experts is too unbalanced.
+
+ Args:
+ router_probs (`torch.Tensor`):
+ Probability assigned to each expert per token. Shape: [batch_size, seqeunce_length, num_experts].
+ expert_indices (`torch.Tensor`):
+ Indices tensor of shape [batch_size, seqeunce_length] identifying the selected expert for a given token.
+
+ Returns:
+ The auxiliary loss.
+ """
+ if router_probs is None:
+ return 0
+
+ num_experts = router_probs.shape[-1]
+
+ # cast the expert indices to int64, otherwise one-hot encoding will fail
+ if expert_indices.dtype != torch.int64:
+ expert_indices = expert_indices.to(torch.int64)
+
+ if len(expert_indices.shape) == 2:
+ expert_indices = expert_indices.unsqueeze(2)
+
+ expert_mask = torch.nn.functional.one_hot(expert_indices, num_experts)
+
+ # For a given token, determine if it was routed to a given expert.
+ expert_mask = torch.max(expert_mask, axis=-2).values
+
+ # cast to float32 otherwise mean will fail
+ expert_mask = expert_mask.to(torch.float32)
+ tokens_per_group_and_expert = torch.mean(expert_mask, axis=-2)
+
+ router_prob_per_group_and_expert = torch.mean(router_probs, axis=-2)
+ return torch.mean(tokens_per_group_and_expert * router_prob_per_group_and_expert) * (num_experts**2)
+
+
+# Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding
+class NllbMoeSinusoidalPositionalEmbedding(nn.Module):
+ """This module produces sinusoidal positional embeddings of any length."""
+
+ def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None):
+ super().__init__()
+ self.offset = 2
+ self.embedding_dim = embedding_dim
+ self.padding_idx = padding_idx
+ self.make_weights(num_positions + self.offset, embedding_dim, padding_idx)
+
+ def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
+ emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx)
+ if hasattr(self, "weights"):
+ # in forward put the weights on the correct dtype and device of the param
+ emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device)
+
+ self.register_buffer("weights", emb_weights, persistent=False)
+
+ @staticmethod
+ def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
+ """
+ Build sinusoidal embeddings.
+
+ This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of
+ "Attention Is All You Need".
+ """
+ half_dim = embedding_dim // 2
+ emb = math.log(10000) / (half_dim - 1)
+ emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb)
+ emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0)
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
+ if embedding_dim % 2 == 1:
+ # zero pad
+ emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
+ if padding_idx is not None:
+ emb[padding_idx, :] = 0
+
+ return emb.to(torch.get_default_dtype())
+
+ @torch.no_grad()
+ def forward(
+ self, input_ids: torch.Tensor = None, inputs_embeds: torch.Tensor = None, past_key_values_length: int = 0
+ ):
+ if input_ids is not None:
+ bsz, seq_len = input_ids.size()
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
+ position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to(
+ input_ids.device
+ )
+ else:
+ bsz, seq_len = inputs_embeds.size()[:-1]
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds, past_key_values_length)
+
+ # expand embeddings if needed
+ max_pos = self.padding_idx + 1 + seq_len + past_key_values_length
+ if max_pos > self.weights.size(0):
+ self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx)
+
+ return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, self.weights.shape[-1]).detach()
+
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds, past_key_values_length):
+ """
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
+
+ Args:
+ inputs_embeds: torch.Tensor
+
+ Returns: torch.Tensor
+ """
+ input_shape = inputs_embeds.size()[:-1]
+ sequence_length = input_shape[1]
+
+ position_ids = torch.arange(
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
+ )
+ return position_ids.unsqueeze(0).expand(input_shape).contiguous() + past_key_values_length
+
+
+class NllbMoeTop2Router(nn.Module):
+ """
+ Router using tokens choose top-2 experts assignment.
+
+ This router uses the same mechanism as in NLLB-MoE from the fairseq repository. Items are sorted by router_probs
+ and then routed to their choice of expert until the expert's expert_capacity is reached. **There is no guarantee
+ that each token is processed by an expert**, or that each expert receives at least one token.
+
+ The router combining weights are also returned to make sure that the states that are not updated will be masked.
+
+ """
+
+ def __init__(self, config: NllbMoeConfig):
+ super().__init__()
+ self.num_experts = config.num_experts
+ self.expert_capacity = config.expert_capacity
+ self.classifier = nn.Linear(config.hidden_size, self.num_experts, bias=config.router_bias)
+ self.router_ignore_padding_tokens = config.router_ignore_padding_tokens
+ self.dtype = getattr(torch, config.router_dtype)
+
+ self.second_expert_policy = config.second_expert_policy
+ self.normalize_router_prob_before_dropping = config.normalize_router_prob_before_dropping
+ self.batch_prioritized_routing = config.batch_prioritized_routing
+ self.moe_eval_capacity_token_fraction = config.moe_eval_capacity_token_fraction
+
+ def _cast_classifier(self):
+ r"""
+ `bitsandbytes` `Linear8bitLt` layers does not support manual casting Therefore we need to check if they are an
+ instance of the `Linear8bitLt` class by checking special attributes.
+ """
+ if not (hasattr(self.classifier, "SCB") or hasattr(self.classifier, "CB")):
+ self.classifier = self.classifier.to(self.dtype)
+
+ def normalize_router_probabilities(self, router_probs, top_1_mask, top_2_mask):
+ top_1_max_probs = (router_probs * top_1_mask).sum(dim=1)
+ top_2_max_probs = (router_probs * top_2_mask).sum(dim=1)
+ denom_s = torch.clamp(top_1_max_probs + top_2_max_probs, min=torch.finfo(router_probs.dtype).eps)
+ top_1_max_probs = top_1_max_probs / denom_s
+ top_2_max_probs = top_2_max_probs / denom_s
+ return top_1_max_probs, top_2_max_probs
+
+ def route_tokens(
+ self,
+ router_logits: torch.Tensor,
+ input_dtype: torch.dtype = torch.float32,
+ padding_mask: Optional[torch.LongTensor] = None,
+ ) -> Tuple:
+ """
+ Computes the `dispatch_mask` and the `dispatch_weights` for each experts. The masks are adapted to the expert
+ capacity.
+ """
+ nb_tokens = router_logits.shape[0]
+ # Apply Softmax and cast back to the original `dtype`
+ router_probs = nn.functional.softmax(router_logits, dim=-1, dtype=self.dtype).to(input_dtype)
+ top_1_expert_index = torch.argmax(router_probs, dim=-1)
+ top_1_mask = torch.nn.functional.one_hot(top_1_expert_index, num_classes=self.num_experts)
+
+ if self.second_expert_policy == "sampling":
+ gumbel = torch.distributions.gumbel.Gumbel(0, 1).rsample
+ router_logits += gumbel(router_logits.shape).to(router_logits.device)
+
+ # replace top_1_expert_index with min values
+ logits_except_top_1 = router_logits.masked_fill(top_1_mask.bool(), float("-inf"))
+ top_2_expert_index = torch.argmax(logits_except_top_1, dim=-1)
+ top_2_mask = torch.nn.functional.one_hot(top_2_expert_index, num_classes=self.num_experts)
+
+ if self.normalize_router_prob_before_dropping:
+ top_1_max_probs, top_2_max_probs = self.normalize_router_probabilities(
+ router_probs, top_1_mask, top_2_mask
+ )
+
+ if self.second_expert_policy == "random":
+ top_2_max_probs = (router_probs * top_2_mask).sum(dim=1)
+ sampled = (2 * top_2_max_probs) > torch.rand_like(top_2_max_probs.float())
+ top_2_mask = top_2_mask * sampled.repeat(self.num_experts, 1).transpose(1, 0)
+
+ if padding_mask is not None and not self.router_ignore_padding_tokens:
+ if len(padding_mask.shape) == 4:
+ # only get the last causal mask
+ padding_mask = padding_mask[:, :, -1, :].reshape(-1)[-nb_tokens:]
+ non_padding = ~padding_mask.bool()
+ top_1_mask = top_1_mask * non_padding.unsqueeze(-1).to(top_1_mask.dtype)
+ top_2_mask = top_2_mask * non_padding.unsqueeze(-1).to(top_1_mask.dtype)
+
+ if self.batch_prioritized_routing:
+ # sort tokens based on their routing probability
+ # to make sure important tokens are routed, first
+ importance_scores = -1 * router_probs.max(dim=1)[0]
+ sorted_top_1_mask = top_1_mask[importance_scores.argsort(dim=0)]
+ sorted_cumsum1 = (torch.cumsum(sorted_top_1_mask, dim=0) - 1) * sorted_top_1_mask
+ locations1 = sorted_cumsum1[importance_scores.argsort(dim=0).argsort(dim=0)]
+
+ sorted_top_2_mask = top_2_mask[importance_scores.argsort(dim=0)]
+ sorted_cumsum2 = (torch.cumsum(sorted_top_2_mask, dim=0) - 1) * sorted_top_2_mask
+ locations2 = sorted_cumsum2[importance_scores.argsort(dim=0).argsort(dim=0)]
+ # Update 2nd's location by accounting for locations of 1st
+ locations2 += torch.sum(top_1_mask, dim=0, keepdim=True)
+
+ else:
+ locations1 = torch.cumsum(top_1_mask, dim=0) - 1
+ locations2 = torch.cumsum(top_2_mask, dim=0) - 1
+ # Update 2nd's location by accounting for locations of 1st
+ locations2 += torch.sum(top_1_mask, dim=0, keepdim=True)
+
+ if not self.training and self.moe_eval_capacity_token_fraction > 0:
+ self.expert_capacity = math.ceil(self.moe_eval_capacity_token_fraction * nb_tokens)
+ else:
+ capacity = 2 * math.ceil(nb_tokens / self.num_experts)
+ self.expert_capacity = capacity if self.expert_capacity is None else self.expert_capacity
+
+ # Remove locations outside capacity from ( cumsum < capacity = False will not be routed)
+ top_1_mask = top_1_mask * torch.lt(locations1, self.expert_capacity)
+ top_2_mask = top_2_mask * torch.lt(locations2, self.expert_capacity)
+
+ if not self.normalize_router_prob_before_dropping:
+ top_1_max_probs, top_2_max_probs = self.normalize_router_probabilities(
+ router_probs, top_1_mask, top_2_mask
+ )
+
+ # Calculate combine_weights and dispatch_mask
+ gates1 = top_1_max_probs[:, None] * top_1_mask
+ gates2 = top_2_max_probs[:, None] * top_2_mask
+ router_probs = gates1 + gates2
+
+ return top_1_mask, router_probs
+
+ def forward(self, hidden_states: torch.Tensor, padding_mask: Optional[torch.LongTensor] = None) -> Tuple:
+ r"""
+ The hidden states are reshaped to simplify the computation of the router probabilities (combining weights for
+ each experts.)
+
+ Args:
+ hidden_states (`torch.Tensor`):
+ (batch_size, sequence_length, hidden_dim) from which router probabilities are computed.
+ Returns:
+ top_1_mask (`torch.Tensor` of shape (batch_size, sequence_length)):
+ Index tensor of shape [batch_size, sequence_length] corresponding to the expert selected for each token
+ using the top1 probabilities of the router.
+ router_probabilities (`torch.Tensor` of shape (batch_size, sequence_length, nump_experts)):
+ Tensor of shape (batch_size, sequence_length, num_experts) corresponding to the probabilities for each
+ token and expert. Used for routing tokens to experts.
+ router_logits (`torch.Tensor` of shape (batch_size, sequence_length))):
+ Logits tensor of shape (batch_size, sequence_length, num_experts) corresponding to raw router logits.
+ This is used later for computing router z-loss.
+ """
+ self.input_dtype = hidden_states.dtype
+ batch_size, sequence_length, hidden_dim = hidden_states.shape
+ hidden_states = hidden_states.reshape((batch_size * sequence_length), hidden_dim)
+ hidden_states = hidden_states.to(self.dtype)
+ self._cast_classifier()
+ router_logits = self.classifier(hidden_states)
+ top_1_mask, router_probs = self.route_tokens(router_logits, self.input_dtype, padding_mask)
+ return top_1_mask, router_probs
+
+
+class NllbMoeDenseActDense(nn.Module):
+ def __init__(self, config: NllbMoeConfig, ffn_dim: int):
+ super().__init__()
+ self.fc1 = nn.Linear(config.d_model, ffn_dim)
+ self.fc2 = nn.Linear(ffn_dim, config.d_model)
+ self.dropout = nn.Dropout(config.activation_dropout)
+ self.act = ACT2FN[config.activation_function]
+
+ def forward(self, hidden_states):
+ hidden_states = self.fc1(hidden_states)
+ hidden_states = self.act(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ if (
+ isinstance(self.fc2.weight, torch.Tensor)
+ and hidden_states.dtype != self.fc2.weight.dtype
+ and (self.fc2.weight.dtype != torch.int8 and self.fc2.weight.dtype != torch.uint8)
+ ):
+ hidden_states = hidden_states.to(self.fc2.weight.dtype)
+ hidden_states = self.fc2(hidden_states)
+ return hidden_states
+
+
+class NllbMoeSparseMLP(nn.Module):
+ r"""
+ Implementation of the NLLB-MoE sparse MLP module.
+ """
+
+ def __init__(self, config: NllbMoeConfig, ffn_dim: int, expert_class: nn.Module = NllbMoeDenseActDense):
+ super().__init__()
+ self.router = NllbMoeTop2Router(config)
+ self.moe_token_dropout = config.moe_token_dropout
+ self.token_dropout = nn.Dropout(self.moe_token_dropout)
+ self.num_experts = config.num_experts
+
+ self.experts = nn.ModuleDict()
+ for idx in range(self.num_experts):
+ self.experts[f"expert_{idx}"] = expert_class(config, ffn_dim)
+
+ def forward(self, hidden_states: torch.Tensor, padding_mask: Optional[torch.Tensor] = False):
+ r"""
+ The goal of this forward pass is to have the same number of operation as the equivalent `NllbMoeDenseActDense`
+ (mlp) layer. This means that all of the hidden states should be processed at most twice ( since we are using a
+ top_2 gating mecanism). This means that we keep the complexity to O(batch_size x sequence_length x hidden_dim)
+ instead of O(num_experts x batch_size x sequence_length x hidden_dim).
+
+ 1- Get the `router_probs` from the `router`. The shape of the `router_mask` is `(batch_size X sequence_length,
+ num_expert)` and corresponds to the boolean version of the `router_probs`. The inputs are masked using the
+ `router_mask`.
+
+ 2- Dispatch the hidden_states to its associated experts. The router probabilities are used to weight the
+ contribution of each experts when updating the masked hidden states.
+
+ Args:
+ hidden_states (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_dim)`):
+ The hidden states
+ padding_mask (`torch.Tensor`, *optional*, defaults to `False`):
+ Attention mask. Can be in the causal form or not.
+
+ Returns:
+ hidden_states (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_dim)`):
+ Updated hidden states
+ router_logits (`torch.Tensor` of shape `(batch_size, sequence_length, num_experts)`):
+ Needed for computing the loss
+
+ """
+ batch_size, sequence_length, hidden_dim = hidden_states.shape
+
+ top_1_mask, router_probs = self.router(hidden_states, padding_mask)
+ router_mask = router_probs.bool()
+ hidden_states = hidden_states.reshape((batch_size * sequence_length), hidden_dim)
+ masked_hidden_states = torch.einsum("bm,be->ebm", hidden_states, router_mask)
+ for idx, expert in enumerate(self.experts.values()):
+ token_indices = router_mask[:, idx]
+ combining_weights = router_probs[token_indices, idx]
+ expert_output = expert(masked_hidden_states[idx, token_indices])
+ if self.moe_token_dropout > 0:
+ if self.training:
+ expert_output = self.token_dropout(expert_output)
+ else:
+ expert_output *= 1 - self.moe_token_dropout
+ masked_hidden_states[idx, token_indices] = torch.einsum("b,be->be", combining_weights, expert_output)
+ hidden_states = masked_hidden_states.sum(dim=0).reshape(batch_size, sequence_length, hidden_dim)
+
+ top_1_expert_index = torch.argmax(top_1_mask, dim=-1)
+ return hidden_states, (router_probs, top_1_expert_index)
+
+
+# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->NllbMoe,key_value_states->encoder_hidden_states
+class NllbMoeAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ is_decoder: bool = False,
+ bias: bool = True,
+ is_causal: bool = False,
+ config: Optional[NllbMoeConfig] = None,
+ ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.dropout = dropout
+ self.head_dim = embed_dim // num_heads
+ self.config = config
+
+ if (self.head_dim * num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+ self.is_causal = is_causal
+
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if encoder_hidden_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = encoder_hidden_states is not None
+
+ bsz, tgt_len, _ = hidden_states.size()
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scaling
+ # get key, value proj
+ # `past_key_value[0].shape[2] == encoder_hidden_states.shape[1]`
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
+ # the provided `encoder_hidden_states` to support prefix tuning
+ if (
+ is_cross_attention
+ and past_key_value is not None
+ and past_key_value[0].shape[2] == encoder_hidden_states.shape[1]
+ ):
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(encoder_hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(encoder_hidden_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
+ key_states = key_states.reshape(*proj_shape)
+ value_states = value_states.reshape(*proj_shape)
+
+ src_len = key_states.size(1)
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
+
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
+ )
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if layer_head_mask is not None:
+ if layer_head_mask.size() != (self.num_heads,):
+ raise ValueError(
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
+ f" {layer_head_mask.size()}"
+ )
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ if output_attentions:
+ # this operation is a bit awkward, but it's required to
+ # make sure that attn_weights keeps its gradient.
+ # In order to do so, attn_weights have to be reshaped
+ # twice and have to be reused in the following
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
+ else:
+ attn_weights_reshaped = None
+
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ attn_output = torch.bmm(attn_probs, value_states)
+
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
+ attn_output = attn_output.transpose(1, 2)
+
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
+ # partitioned across GPUs when using tensor-parallelism.
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped, past_key_value
+
+
+class NllbMoeEncoderLayer(nn.Module):
+ def __init__(self, config: NllbMoeConfig, is_sparse: bool = False):
+ super().__init__()
+ self.embed_dim = config.d_model
+ self.is_sparse = is_sparse
+ self.self_attn = NllbMoeAttention(
+ embed_dim=self.embed_dim,
+ num_heads=config.encoder_attention_heads,
+ dropout=config.attention_dropout,
+ )
+ self.attn_dropout = nn.Dropout(config.dropout)
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ if not self.is_sparse:
+ self.ffn = NllbMoeDenseActDense(config, ffn_dim=config.encoder_ffn_dim)
+ else:
+ self.ffn = NllbMoeSparseMLP(config, ffn_dim=config.encoder_ffn_dim)
+ self.ff_layer_norm = nn.LayerNorm(config.d_model)
+ self.ff_dropout = nn.Dropout(config.activation_dropout)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: torch.Tensor,
+ layer_head_mask: torch.Tensor,
+ output_attentions: bool = False,
+ output_router_logits: bool = False,
+ ) -> torch.Tensor:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`):
+ input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`):
+ attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very
+ large negative values.
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
+ `(encoder_attention_heads,)`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+ hidden_states, attn_weights, _ = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = self.attn_dropout(hidden_states)
+ hidden_states = residual + hidden_states
+
+ residual = hidden_states
+
+ hidden_states = self.ff_layer_norm(hidden_states)
+ if self.is_sparse:
+ hidden_states, router_states = self.ffn(hidden_states, attention_mask)
+ else:
+ # router_states set to None to track which layers have None gradients.
+ hidden_states, router_states = self.ffn(hidden_states), None
+
+ hidden_states = self.ff_dropout(hidden_states)
+
+ hidden_states = residual + hidden_states
+
+ if hidden_states.dtype == torch.float16 and (
+ torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
+ ):
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ if output_router_logits:
+ outputs += (router_states,)
+
+ return outputs
+
+
+class NllbMoeDecoderLayer(nn.Module):
+ def __init__(self, config: NllbMoeConfig, is_sparse: bool = False):
+ super().__init__()
+ self.embed_dim = config.d_model
+ self.is_sparse = is_sparse
+ self.self_attn = NllbMoeAttention(
+ embed_dim=self.embed_dim,
+ num_heads=config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=True,
+ )
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.attn_dropout = nn.Dropout(config.dropout)
+
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.cross_attention = NllbMoeAttention(
+ self.embed_dim, config.decoder_attention_heads, config.attention_dropout, is_decoder=True
+ )
+ self.cross_attention_layer_norm = nn.LayerNorm(self.embed_dim)
+ if not self.is_sparse:
+ self.ffn = NllbMoeDenseActDense(config, ffn_dim=config.decoder_ffn_dim)
+ else:
+ self.ffn = NllbMoeSparseMLP(config, ffn_dim=config.decoder_ffn_dim)
+ self.ff_layer_norm = nn.LayerNorm(config.d_model)
+ self.ff_dropout = nn.Dropout(config.activation_dropout)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: Optional[bool] = False,
+ output_router_logits: Optional[bool] = False,
+ use_cache: Optional[bool] = True,
+ ) -> torch.Tensor:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`):
+ input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`):
+ attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very
+ large negative values.
+ encoder_hidden_states (`torch.FloatTensor`):
+ cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
+ encoder_attention_mask (`torch.FloatTensor`):
+ encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by
+ very large negative values.
+ layer_head_mask (`torch.FloatTensor`):
+ mask for attention heads in a given layer of size `(encoder_attention_heads,)`.
+ cross_attn_layer_head_mask (`torch.FloatTensor`):
+ mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`.
+ past_key_value (`Tuple(torch.FloatTensor)`):
+ cached past key and value projection states
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Self Attention
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ past_key_value=self_attn_past_key_value,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = self.attn_dropout(hidden_states)
+ hidden_states = residual + hidden_states
+
+ # Cross-Attention Block
+ cross_attn_present_key_value = None
+ cross_attn_weights = None
+ if encoder_hidden_states is not None:
+ residual = hidden_states
+ hidden_states = self.cross_attention_layer_norm(hidden_states)
+
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.cross_attention(
+ hidden_states=hidden_states,
+ encoder_hidden_states=encoder_hidden_states,
+ past_key_value=cross_attn_past_key_value,
+ attention_mask=encoder_attention_mask,
+ layer_head_mask=cross_attn_layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = self.attn_dropout(hidden_states)
+ hidden_states = residual + hidden_states
+
+ # add cross-attn to positions 3,4 of present_key_value tuple
+ present_key_value += cross_attn_present_key_value
+
+ # Fully Connected
+ residual = hidden_states
+
+ hidden_states = self.ff_layer_norm(hidden_states)
+ if self.is_sparse:
+ hidden_states, router_states = self.ffn(hidden_states, attention_mask)
+ else:
+ hidden_states, router_states = self.ffn(hidden_states), None
+
+ hidden_states = self.ff_dropout(hidden_states)
+
+ hidden_states = residual + hidden_states
+
+ # clamp inf values to enable fp16 training
+ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
+
+ outputs = (hidden_states, present_key_value)
+
+ if output_attentions:
+ outputs += (self_attn_weights, cross_attn_weights)
+
+ if output_router_logits:
+ outputs += (router_states,)
+
+ return outputs
+
+
+class NllbMoePreTrainedModel(PreTrainedModel):
+ config_class = NllbMoeConfig
+ base_model_prefix = "model"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["NllbMoeEncoderLayer", "NllbMoeDecoderLayer"]
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ std = self.config.init_std
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+
+NLLB_MOE_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`NllbMoeConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+NLLB_MOE_GENERATION_EXAMPLE = r"""
+ Translation example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, NllbMoeForConditionalGeneration
+
+ >>> model = NllbMoeForConditionalGeneration.from_pretrained("facebook/nllb-moe-54b")
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-moe-54b")
+
+ >>> text_to_translate = "Life is like a box of chocolates"
+ >>> model_inputs = tokenizer(text_to_translate, return_tensors="pt")
+
+ >>> # translate to French
+ >>> gen_tokens = model.generate(**model_inputs, forced_bos_token_id=tokenizer.get_lang_id("eng_Latn"))
+ >>> print(tokenizer.batch_decode(gen_tokens, skip_special_tokens=True))
+ ```
+"""
+
+NLLB_MOE_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ NllbMoe uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+ decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
+ 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
+ input (see `past_key_values`). This is useful if you want more control over how to convert
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
+
+ If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
+ of `inputs_embeds`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ output_router_logits (`bool`, *optional*):
+ Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
+ should not be returned during inference.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+class NllbMoeEncoder(NllbMoePreTrainedModel):
+ """
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
+ [`NllbMoeEncoderLayer`].
+
+ Args:
+ config:
+ NllbMoeConfig
+ embed_tokens (nn.Embedding):
+ output embedding
+ """
+
+ def __init__(self, config: NllbMoeConfig, embed_tokens: Optional[nn.Embedding] = None):
+ super().__init__(config)
+
+ self.dropout = config.dropout
+ self.layerdrop = config.encoder_layerdrop
+
+ embed_dim = config.d_model
+ self.padding_idx = config.pad_token_id
+ self.max_source_positions = config.max_position_embeddings
+ self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
+
+ if embed_tokens is not None:
+ self.embed_tokens.weight = embed_tokens.weight
+
+ self.embed_positions = NllbMoeSinusoidalPositionalEmbedding(
+ config.max_position_embeddings,
+ embed_dim,
+ self.padding_idx,
+ )
+ sparse_step = config.encoder_sparse_step
+ self.layers = nn.ModuleList()
+ for i in range(config.encoder_layers):
+ is_sparse = (i + 1) % sparse_step == 0 if sparse_step > 0 else False
+ self.layers.append(NllbMoeEncoderLayer(config, is_sparse))
+
+ self.layer_norm = nn.LayerNorm(config.d_model)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_router_logits: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ):
+ r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ output_router_logits (`bool`, *optional*):
+ Whether or not to return the logits of all the routers. They are useful for computing the router loss,
+ and should not be returned during inference.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ # retrieve input_ids and inputs_embeds
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ input_ids = input_ids.view(-1, input_shape[-1])
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+
+ embed_pos = self.embed_positions(input_ids, inputs_embeds)
+ embed_pos = embed_pos.to(inputs_embeds.device)
+
+ hidden_states = inputs_embeds + embed_pos
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ # expand attention_mask
+ if attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
+
+ encoder_states = () if output_hidden_states else None
+ all_router_probs = () if output_router_logits else None
+ all_attentions = () if output_attentions else None
+
+ # check if head_mask has a correct number of layers specified if desired
+ if head_mask is not None:
+ if head_mask.size()[0] != len(self.layers):
+ raise ValueError(
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
+ f" {head_mask.size()[0]}."
+ )
+
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ dropout_probability = torch.rand([])
+ if self.training and (dropout_probability < self.layerdrop): # skip the layer
+ layer_outputs = (None, None, None)
+ else:
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ encoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ (head_mask[idx] if head_mask is not None else None),
+ output_attentions,
+ )
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ output_attentions=output_attentions,
+ output_router_logits=output_router_logits,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions += (layer_outputs[1],)
+
+ if output_router_logits:
+ all_router_probs += (layer_outputs[-1],)
+
+ last_hidden_state = self.layer_norm(hidden_states)
+
+ if output_hidden_states:
+ encoder_states += (last_hidden_state,)
+
+ if not return_dict:
+ return tuple(
+ v for v in [last_hidden_state, encoder_states, all_attentions, all_router_probs] if v is not None
+ )
+
+ return MoEModelOutput(
+ last_hidden_state=last_hidden_state,
+ hidden_states=encoder_states,
+ attentions=all_attentions,
+ router_probs=all_router_probs,
+ )
+
+
+class NllbMoeDecoder(NllbMoePreTrainedModel):
+ """
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`NllbMoeDecoderLayer`]
+
+ Args:
+ config:
+ NllbMoeConfig
+ embed_tokens (nn.Embedding):
+ output embedding
+ """
+
+ def __init__(self, config: NllbMoeConfig, embed_tokens: Optional[nn.Embedding] = None):
+ super().__init__(config)
+ self.dropout = config.dropout
+ self.layerdrop = config.decoder_layerdrop
+ self.padding_idx = config.pad_token_id
+ self.max_target_positions = config.max_position_embeddings
+ self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
+
+ if embed_tokens is not None:
+ self.embed_tokens.weight = embed_tokens.weight
+
+ self.embed_positions = NllbMoeSinusoidalPositionalEmbedding(
+ config.max_position_embeddings,
+ config.d_model,
+ self.padding_idx,
+ )
+
+ sparse_step = config.decoder_sparse_step
+ self.layers = nn.ModuleList()
+ for i in range(config.decoder_layers):
+ is_sparse = (i + 1) % sparse_step == 0 if sparse_step > 0 else False
+ self.layers.append(NllbMoeDecoderLayer(config, is_sparse))
+
+ self.layer_norm = nn.LayerNorm(config.d_model)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_router_logits: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ):
+ r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
+ of the decoder.
+ encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
+ selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
+ cross-attention on hidden heads. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ output_router_logits (`bool`, *optional*):
+ Whether or not to return the logits of all the routers. They are useful for computing the router loss,
+ and should not be returned during inference.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ # retrieve input_ids and inputs_embeds
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = input_ids.size()
+ input_ids = input_ids.view(-1, input_shape[-1])
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
+
+ # past_key_values_length
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+
+ # create causal mask
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ combined_attention_mask = _prepare_4d_causal_attention_mask(
+ attention_mask, input_shape, inputs_embeds, past_key_values_length
+ )
+
+ # expand encoder attention mask
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ encoder_attention_mask = _prepare_4d_attention_mask(
+ encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
+ )
+
+ # embed positions
+ positions = self.embed_positions(input_ids, inputs_embeds, past_key_values_length)
+ positions = positions.to(inputs_embeds.device)
+
+ hidden_states = inputs_embeds + positions
+
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting" " `use_cache=False`..."
+ )
+ use_cache = False
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_router_probs = () if output_router_logits else None
+ all_cross_attentions = () if output_attentions else None
+ present_key_value_states = () if use_cache else None
+
+ # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
+ for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
+ if attn_mask is not None:
+ if attn_mask.size()[0] != len(self.layers):
+ raise ValueError(
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
+ f" {head_mask.size()[0]}."
+ )
+ deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
+
+ for idx, decoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ dropout_probability = torch.rand([])
+
+ skip_the_layer = True if self.training and (dropout_probability < self.layerdrop) else False
+ if not skip_the_layer or deepspeed_zero3_is_enabled:
+ layer_head_mask = head_mask[idx] if head_mask is not None else None
+ cross_attn_layer_head_mask = cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+
+ # under deepspeed zero3 all gpus must run in sync
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.forward,
+ hidden_states,
+ combined_attention_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ layer_head_mask,
+ cross_attn_layer_head_mask,
+ None, # past_key_value is always None with gradient checkpointing
+ use_cache,
+ output_attentions,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=combined_attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ layer_head_mask=layer_head_mask,
+ cross_attn_layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=past_key_value,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_router_logits=output_router_logits,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if skip_the_layer:
+ continue
+
+ if use_cache:
+ present_key_value_states += (layer_outputs[1],)
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[2],)
+ all_cross_attentions += (layer_outputs[3],)
+
+ if output_router_logits:
+ all_router_probs += (layer_outputs[-1],)
+
+ hidden_states = self.layer_norm(hidden_states)
+
+ # Add last layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ hidden_states,
+ present_key_value_states,
+ all_hidden_states,
+ all_self_attns,
+ all_cross_attentions,
+ all_router_probs,
+ ]
+ if v is not None
+ )
+ return MoEModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=present_key_value_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ cross_attentions=all_cross_attentions,
+ router_probs=all_router_probs,
+ )
+
+
+@add_start_docstrings(
+ "The bare NllbMoe Model outputting raw hidden-states without any specific head on top.",
+ NLLB_MOE_START_DOCSTRING,
+)
+class NllbMoeModel(NllbMoePreTrainedModel):
+ _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]
+
+ def __init__(self, config: NllbMoeConfig):
+ super().__init__(config)
+
+ padding_idx, vocab_size = config.pad_token_id, config.vocab_size
+ self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
+
+ self.encoder = NllbMoeEncoder(config, self.shared)
+ self.decoder = NllbMoeDecoder(config, self.shared)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.shared
+
+ def set_input_embeddings(self, value):
+ self.shared = value
+ self.encoder.embed_tokens = self.shared
+ self.decoder.embed_tokens = self.shared
+
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared)
+ self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared)
+
+ def get_encoder(self):
+ return self.encoder
+
+ def get_decoder(self):
+ return self.decoder
+
+ @add_start_docstrings_to_model_forward(NLLB_MOE_INPUTS_DOCSTRING)
+ @add_start_docstrings_to_model_forward(NLLB_MOE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqMoEModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_router_logits: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], Seq2SeqMoEModelOutput]:
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, NllbMoeModel
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/random-nllb-moe-2-experts")
+ >>> model = SwitchTransformersModel.from_pretrained("hf-internal-testing/random-nllb-moe-2-experts")
+
+ >>> input_ids = tokenizer(
+ ... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
+ ... ).input_ids # Batch size 1
+ >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
+
+ >>> # preprocess: Prepend decoder_input_ids with start token which is pad token for NllbMoeModel
+ >>> decoder_input_ids = model._shift_right(decoder_input_ids)
+
+ >>> # forward pass
+ >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
+ >>> last_hidden_states = outputs.last_hidden_state
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+ if encoder_outputs is None:
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ output_router_logits=output_router_logits,
+ return_dict=return_dict,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
+ elif return_dict and not isinstance(encoder_outputs, MoEModelOutput):
+ encoder_outputs = MoEModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ router_probs=encoder_outputs[3] if len(encoder_outputs) > 3 else None,
+ )
+
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=attention_mask,
+ head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ output_router_logits=output_router_logits,
+ return_dict=return_dict,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+
+ return Seq2SeqMoEModelOutput(
+ past_key_values=decoder_outputs.past_key_values,
+ cross_attentions=decoder_outputs.cross_attentions,
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ decoder_attentions=decoder_outputs.attentions,
+ encoder_router_logits=encoder_outputs.router_probs,
+ decoder_router_logits=decoder_outputs.router_probs,
+ )
+
+
+@add_start_docstrings(
+ "The NllbMoe Model with a language modeling head. Can be used for summarization.", NLLB_MOE_START_DOCSTRING
+)
+class NllbMoeForConditionalGeneration(NllbMoePreTrainedModel):
+ base_model_prefix = "model"
+ _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"]
+
+ def __init__(self, config: NllbMoeConfig):
+ super().__init__(config)
+ self.model = NllbMoeModel(config)
+ self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
+
+ self.router_z_loss_coef = config.router_z_loss_coef
+ self.router_aux_loss_coef = config.router_aux_loss_coef
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_encoder(self):
+ return self.model.get_encoder()
+
+ def get_decoder(self):
+ return self.model.get_decoder()
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ @add_start_docstrings_to_model_forward(NLLB_MOE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqMoEOutput, config_class=_CONFIG_FOR_DOC)
+ @add_end_docstrings(NLLB_MOE_GENERATION_EXAMPLE)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_router_logits: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], Seq2SeqMoEOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ Returns:
+ """
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_router_logits = (
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
+ )
+ if labels is not None:
+ if decoder_input_ids is None:
+ decoder_input_ids = shift_tokens_right(
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
+ )
+
+ outputs = self.model(
+ input_ids,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ encoder_outputs=encoder_outputs,
+ decoder_attention_mask=decoder_attention_mask,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ decoder_inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ output_router_logits=output_router_logits,
+ return_dict=return_dict,
+ )
+ lm_logits = self.lm_head(outputs[0])
+
+ loss = None
+ encoder_aux_loss = None
+ decoder_aux_loss = None
+
+ if labels is not None:
+ loss_fct = CrossEntropyLoss(ignore_index=-100)
+ # todo check in the config if router loss enables
+
+ if output_router_logits:
+ encoder_router_logits = outputs[-1]
+ decoder_router_logits = outputs[3 if output_attentions else 4]
+
+ # Compute the router loss (z_loss + auxiliary loss) for each router in the encoder and decoder
+ encoder_router_logits, encoder_expert_indexes = self._unpack_router_logits(encoder_router_logits)
+ encoder_aux_loss = load_balancing_loss_func(encoder_router_logits, encoder_expert_indexes)
+
+ decoder_router_logits, decoder_expert_indexes = self._unpack_router_logits(decoder_router_logits)
+ decoder_aux_loss = load_balancing_loss_func(decoder_router_logits, decoder_expert_indexes)
+
+ loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
+
+ if output_router_logits and labels is not None:
+ aux_loss = self.router_aux_loss_coef * (encoder_aux_loss + decoder_aux_loss)
+ loss = loss + aux_loss
+
+ output = (loss,) if loss is not None else ()
+ if not return_dict:
+ output += (lm_logits,)
+ if output_router_logits: # only return the loss if they are not None
+ output += (
+ encoder_aux_loss,
+ decoder_aux_loss,
+ *outputs[1:],
+ )
+ else:
+ output += outputs[1:]
+
+ return output
+
+ return Seq2SeqMoEOutput(
+ loss=loss,
+ logits=lm_logits,
+ past_key_values=outputs.past_key_values,
+ cross_attentions=outputs.cross_attentions,
+ encoder_aux_loss=encoder_aux_loss,
+ decoder_aux_loss=decoder_aux_loss,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ decoder_attentions=outputs.decoder_attentions,
+ encoder_router_logits=outputs.encoder_router_logits,
+ decoder_router_logits=outputs.decoder_router_logits,
+ )
+
+ def _unpack_router_logits(self, router_outputs):
+ total_router_logits = []
+ total_expert_indexes = []
+ for router_output in router_outputs:
+ if router_output is not None:
+ router_logits, expert_indexes = router_output
+ total_router_logits.append(router_logits)
+ total_expert_indexes.append(expert_indexes)
+
+ total_router_logits = torch.cat(total_router_logits, dim=1) if len(total_router_logits) > 0 else None
+ total_expert_indexes = torch.stack(total_expert_indexes, dim=1) if len(total_expert_indexes) > 0 else None
+ return total_router_logits, total_expert_indexes
+
+ # Copied from transfomers.models.switch_transformers.SwitchTransformersForConditionalGeneration.prepare_inputs_for_generation
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ head_mask=None,
+ decoder_head_mask=None,
+ cross_attn_head_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ # cut decoder_input_ids if past is used
+ if past_key_values is not None:
+ past_length = past_key_values[0][0].shape[2]
+
+ # Some generation methods already pass only the last input ID
+ if decoder_input_ids.shape[1] > past_length:
+ remove_prefix_length = past_length
+ else:
+ # Default to old behavior: keep only final ID
+ remove_prefix_length = decoder_input_ids.shape[1] - 1
+
+ decoder_input_ids = decoder_input_ids[:, remove_prefix_length:]
+
+ return {
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
+ "encoder_outputs": encoder_outputs,
+ "past_key_values": past_key_values,
+ "decoder_input_ids": decoder_input_ids,
+ "attention_mask": attention_mask,
+ "head_mask": head_mask,
+ "decoder_head_mask": decoder_head_mask,
+ "cross_attn_head_mask": cross_attn_head_mask,
+ "use_cache": use_cache, # change this to avoid caching (presumably for debugging)
+ }
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+ return reordered_past
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..083301cc20c677fa15daa9cff63385f04fcd0507
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/__init__.py
@@ -0,0 +1,65 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
+
+
+_import_structure = {
+ "configuration_prophetnet": ["PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ProphetNetConfig"],
+ "tokenization_prophetnet": ["ProphetNetTokenizer"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_prophetnet"] = [
+ "PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "ProphetNetDecoder",
+ "ProphetNetEncoder",
+ "ProphetNetForCausalLM",
+ "ProphetNetForConditionalGeneration",
+ "ProphetNetModel",
+ "ProphetNetPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_prophetnet import PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ProphetNetConfig
+ from .tokenization_prophetnet import ProphetNetTokenizer
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_prophetnet import (
+ PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST,
+ ProphetNetDecoder,
+ ProphetNetEncoder,
+ ProphetNetForCausalLM,
+ ProphetNetForConditionalGeneration,
+ ProphetNetModel,
+ ProphetNetPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5578673f106f4e14622faea6fc513f0a8e1adda5
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/configuration_prophetnet.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/configuration_prophetnet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c5ca9aa028973c941b24a58413072ddf52026825
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/configuration_prophetnet.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5ea02b1ac6b6c5993327d1389fe388607cf8d406
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/modeling_prophetnet.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/modeling_prophetnet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9ce351ac6b85253ab56da82df6b35fdf7af4a381
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/modeling_prophetnet.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/tokenization_prophetnet.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/tokenization_prophetnet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..20028d5e35fda6be7bb1fca4bf4758182f65751f
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/tokenization_prophetnet.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/configuration_prophetnet.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/configuration_prophetnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..e07936a14cd30245a28b7e1619d1b46ac0ca9f63
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/configuration_prophetnet.py
@@ -0,0 +1,180 @@
+# coding=utf-8
+# Copyright 2020 The Microsoft Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" ProphetNet model configuration"""
+
+from typing import Callable, Optional, Union
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class ProphetNetConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`ProphetNetModel`]. It is used to instantiate a
+ ProphetNet model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the ProphetNet
+ [microsoft/prophetnet-large-uncased](https://huggingface.co/microsoft/prophetnet-large-uncased) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ activation_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for activations inside the fully connected layer.
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ vocab_size (`int`, *optional*, defaults to 30522):
+ Vocabulary size of the ProphetNET model. Defines the number of different tokens that can be represented by
+ the `inputs_ids` passed when calling [`ProphetNetModel`].
+ hidden_size (`int`, *optional*, defaults to 1024):
+ Dimensionality of the layers and the pooler layer.
+ encoder_ffn_dim (`int`, *optional*, defaults to 4096):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
+ num_encoder_layers (`int`, *optional*, defaults to 12):
+ Number of encoder layers.
+ num_encoder_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ decoder_ffn_dim (`int`, *optional*, defaults to 4096):
+ Dimensionality of the `intermediate` (often named feed-forward) layer in decoder.
+ num_decoder_layers (`int`, *optional*, defaults to 12):
+ Number of decoder layers.
+ num_decoder_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer decoder.
+ attention_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ init_std (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ add_cross_attention (`bool`, *optional*, defaults to `True`):
+ Whether cross-attention layers should be added to the model.
+ is_encoder_decoder (`bool`, *optional*, defaults to `True`):
+ Whether this is an encoder/decoder model.
+ pad_token_id (`int`, *optional*, defaults to 1)
+ Padding token id.
+ bos_token_id (`int`, *optional*, defaults to 0)
+ Beginning of stream token id.
+ eos_token_id (`int`, *optional*, defaults to 2)
+ End of stream token id.
+ ngram (`int`, *optional*, defaults to 2)
+ Number of future tokens to predict. Set to 1 to be same as traditional Language model to predict next first
+ token.
+ num_buckets (`int`, *optional*, defaults to 32)
+ The number of buckets to use for each attention layer. This is for relative position calculation. See the
+ [T5 paper](see https://arxiv.org/abs/1910.10683) for more details.
+ relative_max_distance (`int`, *optional*, defaults to 128)
+ Relative distances greater than this number will be put into the last same bucket. This is for relative
+ position calculation. See the [T5 paper](see https://arxiv.org/abs/1910.10683) for more details.
+ disable_ngram_loss (`bool`, *optional*, defaults to `False`):
+ Whether be trained predicting only the next first token.
+ eps (`float`, *optional*, defaults to 0.0):
+ Controls the `epsilon` parameter value for label smoothing in the loss calculation. If set to 0, no label
+ smoothing is performed.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models).
+ """
+
+ model_type = "prophetnet"
+ keys_to_ignore_at_inference = ["past_key_values"]
+ attribute_map = {
+ "num_attention_heads": "num_encoder_attention_heads",
+ }
+
+ def __init__(
+ self,
+ activation_dropout: Optional[float] = 0.1,
+ activation_function: Optional[Union[str, Callable]] = "gelu",
+ vocab_size: Optional[int] = 30522,
+ hidden_size: Optional[int] = 1024,
+ encoder_ffn_dim: Optional[int] = 4096,
+ num_encoder_layers: Optional[int] = 12,
+ num_encoder_attention_heads: Optional[int] = 16,
+ decoder_ffn_dim: Optional[int] = 4096,
+ num_decoder_layers: Optional[int] = 12,
+ num_decoder_attention_heads: Optional[int] = 16,
+ attention_dropout: Optional[float] = 0.1,
+ dropout: Optional[float] = 0.1,
+ max_position_embeddings: Optional[int] = 512,
+ init_std: Optional[float] = 0.02,
+ is_encoder_decoder: Optional[bool] = True,
+ add_cross_attention: Optional[bool] = True,
+ decoder_start_token_id: Optional[int] = 0,
+ ngram: Optional[int] = 2,
+ num_buckets: Optional[int] = 32,
+ relative_max_distance: Optional[int] = 128,
+ disable_ngram_loss: Optional[bool] = False,
+ eps: Optional[float] = 0.0,
+ use_cache: Optional[bool] = True,
+ pad_token_id: Optional[int] = 0,
+ bos_token_id: Optional[int] = 1,
+ eos_token_id: Optional[int] = 2,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.encoder_ffn_dim = encoder_ffn_dim
+ self.num_encoder_layers = num_encoder_layers
+ self.num_encoder_attention_heads = num_encoder_attention_heads
+ self.decoder_ffn_dim = decoder_ffn_dim
+ self.num_decoder_layers = num_decoder_layers
+ self.num_decoder_attention_heads = num_decoder_attention_heads
+ self.max_position_embeddings = max_position_embeddings
+ self.init_std = init_std # Normal(0, this parameter)
+ self.activation_function = activation_function
+
+ # parameters for prophetnet
+ self.ngram = ngram
+ self.num_buckets = num_buckets
+ self.relative_max_distance = relative_max_distance
+ self.disable_ngram_loss = disable_ngram_loss
+ self.eps = eps
+
+ # 3 Types of Dropout
+ self.attention_dropout = attention_dropout
+ self.activation_dropout = activation_dropout
+ self.dropout = dropout
+
+ self.use_cache = use_cache
+
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ is_encoder_decoder=is_encoder_decoder,
+ add_cross_attention=add_cross_attention,
+ decoder_start_token_id=decoder_start_token_id,
+ **kwargs,
+ )
+
+ @property
+ def num_hidden_layers(self) -> int:
+ return self.num_encoder_layers + self.num_decoder_layers
+
+ @num_hidden_layers.setter
+ def num_hidden_layers(self, value):
+ raise NotImplementedError(
+ "This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
+ " `num_decoder_layers`."
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..c9e64c06ef769ad79fa43e46d6945c9d5f9f86e9
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py
@@ -0,0 +1,160 @@
+# coding=utf-8
+# Copyright 2020 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert ProphetNet checkpoint."""
+
+
+import argparse
+
+from torch import nn
+
+# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
+# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
+from transformers_old.modeling_prophetnet import (
+ ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
+)
+from transformers_old.modeling_xlm_prophetnet import (
+ XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
+)
+
+from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
+
+
+logger = logging.get_logger(__name__)
+logging.set_verbosity_info()
+
+
+def convert_prophetnet_checkpoint_to_pytorch(prophetnet_checkpoint_path: str, pytorch_dump_folder_path: str):
+ """
+ Copy/paste/tweak prohpetnet's weights to our prophetnet structure.
+ """
+ if "xprophetnet" in prophetnet_checkpoint_path:
+ prophet_old = XLMProphetNetForConditionalGenerationOld.from_pretrained(prophetnet_checkpoint_path)
+ prophet, loading_info = XLMProphetNetForConditionalGeneration.from_pretrained(
+ prophetnet_checkpoint_path, output_loading_info=True
+ )
+ else:
+ prophet_old = ProphetNetForConditionalGenerationOld.from_pretrained(prophetnet_checkpoint_path)
+ prophet, loading_info = ProphetNetForConditionalGeneration.from_pretrained(
+ prophetnet_checkpoint_path, output_loading_info=True
+ )
+
+ special_keys = ["key_proj", "value_proj", "query_proj"]
+
+ mapping = {
+ "self_attn": "ngram_self_attn",
+ "cross_attn": "encoder_attn",
+ "cross_attn_layer_norm": "encoder_attn_layer_norm",
+ "feed_forward_layer_norm": "final_layer_norm",
+ "feed_forward": "",
+ "intermediate": "fc1",
+ "output": "fc2",
+ "key_proj": "k_proj",
+ "query_proj": "q_proj",
+ "value_proj": "v_proj",
+ "word_embeddings": "embed_tokens",
+ "embeddings_layer_norm": "emb_layer_norm",
+ "relative_pos_embeddings": "relative_linear",
+ "ngram_embeddings": "ngram_input_embed",
+ "position_embeddings": "embed_positions",
+ }
+
+ for key in loading_info["missing_keys"]:
+ attributes = key.split(".")
+
+ if attributes[0] == "lm_head":
+ model = prophet
+ old_model = prophet_old
+ else:
+ model = prophet.prophetnet
+ old_model = prophet_old.model
+
+ is_key_init = False
+ for attribute in attributes:
+ if attribute in mapping:
+ old_attribute = mapping[attribute]
+ if not hasattr(old_model, old_attribute) and len(old_attribute) > 0:
+ old_attribute = attribute
+ elif hasattr(old_model, attribute):
+ old_attribute = attribute
+
+ if attribute == "weight":
+ assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
+ model.weight = old_model.weight
+ logger.info(f"{attribute} is initialized.")
+ is_key_init = True
+ break
+ elif attribute == "bias":
+ assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
+ model.bias = old_model.bias
+ logger.info(f"{attribute} is initialized")
+ is_key_init = True
+ break
+ elif attribute in special_keys and hasattr(old_model, "in_proj_weight"):
+ embed_dim = old_model.in_proj_weight.shape[0] // 3
+ param = getattr(model, attribute)
+ param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
+ param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
+ if attribute == "query_proj":
+ model.query_proj.weight = nn.Parameter(old_model.in_proj_weight[:embed_dim, :])
+ model.query_proj.bias = nn.Parameter(old_model.in_proj_bias[:embed_dim])
+
+ elif attribute == "key_proj":
+ model.key_proj.weight = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :])
+ model.key_proj.bias = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim])
+ elif attribute == "value_proj":
+ model.value_proj.weight = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :])
+ model.value_proj.bias = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :])
+ is_key_init = True
+ break
+ elif attribute == "position_embeddings":
+ assert (
+ model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
+ ), "Hidden size has to match"
+ assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
+ model.position_embeddings.weight = nn.Parameter(old_model.embed_positions.weight[:512, :])
+ is_key_init = True
+ break
+
+ if attribute.isdigit():
+ model = model[int(attribute)]
+ old_model = old_model[int(old_attribute)]
+ else:
+ model = getattr(model, attribute)
+
+ if old_attribute == "":
+ old_model = old_model
+ else:
+ if not hasattr(old_model, old_attribute):
+ raise ValueError(f"{old_model} does not have {old_attribute}")
+ old_model = getattr(old_model, old_attribute)
+
+ if not is_key_init:
+ raise ValueError(f"{key} was not correctly initialized!")
+
+ print(f"Saving model to {pytorch_dump_folder_path}")
+ prophet.save_pretrained(pytorch_dump_folder_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--prophetnet_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
+ )
+ args = parser.parse_args()
+ convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/modeling_prophetnet.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/modeling_prophetnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..c7d9028cdaf7095a2477fe22d0ccfbd7cff561a1
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/modeling_prophetnet.py
@@ -0,0 +1,2340 @@
+# coding=utf-8
+# Copyright 2020 The Microsoft Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch ProphetNet model, ported from ProphetNet repo(fairsequery_states version)."""
+
+import copy
+import math
+import warnings
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import Tensor, nn
+from torch.nn import LayerNorm
+
+from ...activations import ACT2FN
+from ...modeling_outputs import BaseModelOutput
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_prophetnet import ProphetNetConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "ProphenetConfig"
+_CHECKPOINT_FOR_DOC = "microsoft/prophetnet-large-uncased"
+
+
+from ..deprecated._archive_maps import PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+PROPHETNET_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ Original ProphetNet code can be found [here](https://github.com/microsoft/ProphetNet). Checkpoints were converted
+ from original Fairseq checkpoints. For more information on the checkpoint conversion, please take a look at the
+ file `convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py`.
+
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matters related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`ProphetNetConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+PROPHETNET_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ ProphetNet uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+PROPHETNET_STANDALONE_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+def softmax(hidden_state, dim, onnx_trace=False):
+ if onnx_trace:
+ return nn.functional.softmax(hidden_state.float(), dim=dim)
+ else:
+ return nn.functional.softmax(hidden_state, dim=dim, dtype=torch.float32)
+
+
+def ngram_attention_bias(sequence_length, ngram, device, dtype):
+ """
+ This function computes the bias for the predict stream
+ """
+ left_block = (
+ torch.ones((ngram, sequence_length, sequence_length), device=device, dtype=dtype) * torch.finfo(dtype).min
+ )
+ right_block = left_block.detach().clone()
+ # create bias
+ for stream_idx in range(ngram):
+ right_block[stream_idx].fill_diagonal_(0, wrap=False)
+ left_block[stream_idx].triu_(-stream_idx + 1)
+
+ left_block[:, :, 0] = 0
+ return torch.cat([left_block, right_block], dim=2)
+
+
+def compute_relative_buckets(num_buckets, max_distance, relative_positions, is_bidirectional=False):
+ """
+ This function computes individual parts of the relative position buckets. For more detail, see paper.
+ """
+ inv_relative_positions = -relative_positions
+ rel_positions_bucket = 0
+
+ if is_bidirectional:
+ num_buckets = num_buckets // 2
+ rel_positions_bucket = (
+ rel_positions_bucket
+ + torch.lt(inv_relative_positions, torch.zeros_like(inv_relative_positions)).int() * num_buckets
+ )
+ inv_relative_positions = torch.abs(inv_relative_positions)
+ else:
+ inv_relative_positions = torch.max(inv_relative_positions, torch.zeros_like(inv_relative_positions))
+
+ max_exact = num_buckets // 2
+ is_small = torch.lt(inv_relative_positions, max_exact)
+ val_if_large = max_exact + torch.log(inv_relative_positions.float() / max_exact) / math.log(
+ max_distance / max_exact
+ ) * (num_buckets - max_exact)
+ val_if_large = torch.min(val_if_large, torch.ones_like(val_if_large) * (num_buckets - 1)).int()
+ rel_positions_bucket = rel_positions_bucket + torch.where(is_small, inv_relative_positions.int(), val_if_large)
+ return rel_positions_bucket
+
+
+def compute_all_stream_relative_buckets(num_buckets, max_distance, position_ids):
+ """
+ This function computes both main and predict relative position buckets. For more detail, see paper.
+ """
+ # main stream
+ main_stream_relative_positions = position_ids.unsqueeze(1).repeat(1, position_ids.size(-1), 1)
+ main_stream_relative_positions = main_stream_relative_positions - position_ids.unsqueeze(-1)
+
+ # predicting stream
+ predicting_stream_relative_positions = torch.cat((position_ids - 1, position_ids), dim=-1).unsqueeze(1)
+ predicting_stream_relative_positions = predicting_stream_relative_positions.repeat(1, position_ids.size(-1), 1)
+ predicting_stream_relative_positions = predicting_stream_relative_positions - position_ids.unsqueeze(-1)
+
+ # get both position buckets
+ main_relative_position_buckets = compute_relative_buckets(
+ num_buckets, max_distance, main_stream_relative_positions, is_bidirectional=False
+ )
+ predict_relative_position_buckets = compute_relative_buckets(
+ num_buckets, max_distance, predicting_stream_relative_positions, is_bidirectional=False
+ )
+ return main_relative_position_buckets, predict_relative_position_buckets
+
+
+@dataclass
+class ProphetNetSeq2SeqLMOutput(ModelOutput):
+ """
+ Base class for sequence-to-sequence language models outputs.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, config.vocab_size)`):
+ Prediction scores of the main stream language modeling head (scores for each vocabulary token before
+ SoftMax).
+ logits_ngram (`torch.FloatTensor` of shape `(batch_size, ngram * decoder_sequence_length, config.vocab_size)`):
+ Prediction scores of the predict stream language modeling head (scores for each vocabulary token before
+ SoftMax).
+ past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size,
+ num_attn_heads, decoder_sequence_length, embed_size_per_head)`).
+
+ Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
+ used (see `past_key_values` input) to speed up sequential decoding.
+ decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, decoder_sequence_length, hidden_size)`.
+
+ Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.
+ decoder_ngram_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`.
+
+ Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding
+ outputs.
+ decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ decoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ decoder_ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ decoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the
+ weighted average in the self-attention heads.
+ cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ encoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to
+ compute the weighted average in the
+ encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
+ encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, encoder_sequence_length, hidden_size)`.
+
+ Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
+ encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ encoder_sequence_length, encoder_sequence_length)`. Attentions weights of the encoder, after the attention
+ softmax, used to compute the weighted average in the self-attention heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ logits_ngram: Optional[torch.FloatTensor] = None
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_ngram_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ encoder_last_hidden_state: Optional[torch.FloatTensor] = None
+ encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+ @property
+ def decoder_cross_attentions(self):
+ warnings.warn(
+ "`decoder_cross_attentions` is deprecated and will be removed soon. Please use `cross_attentions`"
+ " instead.",
+ FutureWarning,
+ )
+ return self.cross_attentions
+
+
+@dataclass
+class ProphetNetSeq2SeqModelOutput(ModelOutput):
+ """
+ Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential
+ decoding.
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, hidden_size)`):
+ Sequence of main stream hidden-states at the output of the last layer of the decoder of the model.
+
+ If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
+ hidden_size)` is output.
+ last_hidden_state_ngram (`torch.FloatTensor` of shape `(batch_size,ngram * decoder_sequence_length, config.vocab_size)`, *optional*):
+ Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model.
+ past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size,
+ num_attn_heads, decoder_sequence_length, embed_size_per_head)`).
+
+ Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
+ used (see `past_key_values` input) to speed up sequential decoding.
+ decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, decoder_sequence_length, hidden_size)`.
+
+ Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.
+ decoder_ngram_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`.
+
+ Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding
+ outputs.
+ decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ decoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ decoder_ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ decoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the
+ weighted average in the
+ cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ encoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to
+ compute the weighted average in the
+ encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
+ encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, encoder_sequence_length, hidden_size)`.
+
+ Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
+ encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ encoder_sequence_length, encoder_sequence_length)`.
+
+ Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ """
+
+ last_hidden_state: torch.FloatTensor
+ last_hidden_state_ngram: Optional[torch.FloatTensor] = None
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_ngram_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ encoder_last_hidden_state: Optional[torch.FloatTensor] = None
+ encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+ @property
+ def decoder_cross_attentions(self):
+ warnings.warn(
+ "`decoder_cross_attentions` is deprecated and will be removed soon. Please use `cross_attentions`"
+ " instead.",
+ FutureWarning,
+ )
+ return self.cross_attentions
+
+
+@dataclass
+class ProphetNetDecoderModelOutput(ModelOutput):
+ """
+ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, hidden_size)`):
+ Sequence of main stream hidden-states at the output of the last layer of the decoder of the model.
+
+ If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
+ hidden_size)` is output.
+ last_hidden_state_ngram (`torch.FloatTensor` of shape `(batch_size, ngram * decoder_sequence_length, config.vocab_size)`):
+ Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model.
+ past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size,
+ num_attn_heads, decoder_sequence_length, embed_size_per_head)`).
+
+ Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
+ used (see `past_key_values` input) to speed up sequential decoding.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, decoder_sequence_length, hidden_size)`.
+
+ Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.
+ ngram_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`.
+
+ Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding
+ outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ decoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ decoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the
+ weighted average in the
+ cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ encoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to
+ compute the weighted average in the
+ """
+
+ last_hidden_state: torch.FloatTensor
+ last_hidden_state_ngram: Optional[torch.FloatTensor] = None
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ hidden_states_ngram: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+ ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class ProphetNetDecoderLMOutput(ModelOutput):
+ """
+ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, config.vocab_size)`):
+ Prediction scores of the main stream language modeling head (scores for each vocabulary token before
+ SoftMax).
+ logits_ngram (`torch.FloatTensor` of shape `(batch_size, ngram * decoder_sequence_length, config.vocab_size)`):
+ Prediction scores of the predict stream language modeling head (scores for each vocabulary token before
+ SoftMax).
+ past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size,
+ num_attn_heads, decoder_sequence_length, embed_size_per_head)`).
+
+ Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
+ used (see `past_key_values` input) to speed up sequential decoding.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, decoder_sequence_length, hidden_size)`.
+
+ Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.
+ ngram_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`.
+
+ Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding
+ outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ decoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ decoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the
+ weighted average in the
+ cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ encoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to
+ compute the weighted average in the
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ logits_ngram: Optional[torch.FloatTensor] = None
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ hidden_states_ngram: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+ ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+class ProphetNetPreTrainedModel(PreTrainedModel):
+ config_class = ProphetNetConfig
+ base_model_prefix = "prophetnet"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=self.config.init_std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.init_std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+ def _shift_right(self, input_ids):
+ decoder_start_token_id = self.config.decoder_start_token_id
+ pad_token_id = self.config.pad_token_id
+
+ assert decoder_start_token_id is not None, (
+ "self.model.config.decoder_start_token_id has to be defined. In ProphetNet it is usually set to the"
+ " pad_token_id. See ProphetNet docs for more information"
+ )
+
+ # shift inputs to the right
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
+ shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
+ shifted_input_ids[..., 0] = decoder_start_token_id
+
+ assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
+ # replace possible -100 values in labels by `pad_token_id`
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
+
+ assert torch.all(shifted_input_ids >= 0).item(), "Verify that `shifted_input_ids` has only positive values"
+
+ return shifted_input_ids
+
+
+class ProphetNetPositionalEmbeddings(nn.Embedding):
+ """
+ This module learns positional embeddings up to a fixed maximum size. Padding ids are ignored by either offsetting
+ based on padding_idx or by setting padding_idx to None and ensuring that the appropriate position ids are passed to
+ the forward function.
+ """
+
+ def __init__(self, config: ProphetNetConfig) -> None:
+ self.max_length = config.max_position_embeddings
+ super().__init__(config.max_position_embeddings, config.hidden_size, config.pad_token_id)
+
+ def forward(self, inputs_shape, device, attention_mask=None, past_key_values=None, position_ids=None):
+ assert (position_ids is None) or (
+ self.padding_idx is None
+ ), "If position_ids is pre-computed then padding_idx should not be set."
+
+ if position_ids is None:
+ if past_key_values is not None:
+ # position_ids is the same for every token when decoding a single step
+ # Without the int() cast, it doesn't work in some cases when exporting to ONNX
+ prev_num_input_ids = past_key_values[0][0].shape[2]
+ num_input_ids = inputs_shape[1] + prev_num_input_ids
+ position_ids = torch.ones((1, 1), dtype=torch.long, device=device) * (
+ int(self.padding_idx + num_input_ids)
+ )
+ else:
+ if attention_mask is None:
+ attention_mask = torch.ones(inputs_shape, dtype=torch.long, device=device)
+
+ # retrieve position_ids from input_ids / attention_mask
+ position_ids = (
+ torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask
+ ).long() + self.padding_idx
+
+ # make sure position_ids are not bigger then max_length
+ position_ids = position_ids.clamp(0, self.max_length - 1)
+
+ return super().forward(position_ids), position_ids
+
+ def _forward(self, position_ids):
+ return super().forward(position_ids)
+
+
+class ProphetNetAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(
+ self,
+ config: ProphetNetConfig,
+ num_attn_heads: int,
+ ):
+ super().__init__()
+ hidden_size = config.hidden_size
+
+ self.attention_dropout = config.attention_dropout
+ self.dropout = config.dropout
+ self.num_attn_heads = num_attn_heads
+ self.head_dim = hidden_size // num_attn_heads
+
+ assert self.head_dim * num_attn_heads == hidden_size, (
+ "`config.hidden_size` must be divisible by `config.num_encoder_attention_heads` and"
+ " `config.num_decoder_attention_heads`"
+ )
+
+ self.key_proj = nn.Linear(hidden_size, hidden_size)
+ self.value_proj = nn.Linear(hidden_size, hidden_size)
+ self.query_proj = nn.Linear(hidden_size, hidden_size)
+
+ self.out_proj = nn.Linear(hidden_size, hidden_size)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_attn_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states,
+ key_value_states: Optional[Tensor] = None,
+ attention_mask: Optional[Tensor] = None,
+ layer_head_mask: Optional[Tensor] = None,
+ past_key_value: Optional[Tuple[Tensor]] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[Tensor, Optional[Tensor]]:
+ batch_size, tgt_len, hidden_size = hidden_states.size()
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+ assert list(hidden_states.size()) == [
+ batch_size,
+ tgt_len,
+ hidden_size,
+ ], f"Size of hidden states should be {batch_size, tgt_len, hidden_size}, but is {hidden_states.size()}"
+
+ # previous time steps are cached - no need to recompute key and value if they are static
+ query_states = self.query_proj(hidden_states) / (self.head_dim**0.5)
+
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.key_proj(key_value_states), -1, batch_size)
+ value_states = self._shape(self.value_proj(key_value_states), -1, batch_size)
+ else:
+ # self_attention
+ key_states = self._shape(self.key_proj(hidden_states), -1, batch_size)
+ value_states = self._shape(self.value_proj(hidden_states), -1, batch_size)
+
+ if is_cross_attention:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ # project states into the correct shape
+ proj_shape = (batch_size, self.num_attn_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, batch_size).view(*proj_shape)
+ key_states = key_states.view(*proj_shape)
+ value_states = value_states.view(*proj_shape)
+ src_len = key_states.size(2)
+ attn_weights = torch.einsum("bsij,bsjk->bsik", query_states, key_states.transpose(2, 3))
+ expected_shape = (batch_size, self.num_attn_heads, tgt_len, src_len)
+ if attn_weights.size() != expected_shape:
+ raise ValueError(f"Attention weights should have size {expected_shape}, but is {attn_weights.size()}")
+
+ # This is part of a workaround to get around fork/join parallelism not supporting Optional types.
+ if attention_mask is not None and attention_mask.dim() == 0:
+ attention_mask = None
+
+ expected_shape = (batch_size, self.num_attn_heads, 1, src_len)
+ if attention_mask is not None and attention_mask.size() != expected_shape:
+ raise ValueError(f"Attention mask should have size {expected_shape}, but is {attention_mask.size()}")
+ if attention_mask is not None: # don't attend to padding symbols
+ attn_weights = attn_weights + attention_mask
+ if output_attentions:
+ attn_weights_reshaped = attn_weights
+ else:
+ attn_weights_reshaped = None
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if layer_head_mask is not None:
+ assert layer_head_mask.size() == (self.num_attn_heads,), (
+ f"Head mask for a single layer should be of size {(self.num_attn_heads,)}, but is"
+ f" {layer_head_mask.size()}"
+ )
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(
+ batch_size, self.num_attn_heads, tgt_len, src_len
+ )
+
+ # apply head_mask also on attn_weights_reshaped which is used for n-gram attention inside the model
+ attn_weights_reshaped = layer_head_mask.view(1, -1, 1, 1) * attn_weights_reshaped
+
+ attn_probs = nn.functional.dropout(
+ attn_weights,
+ p=self.attention_dropout,
+ training=self.training,
+ )
+ attn_output = torch.einsum("bsij,bsjk->bsik", attn_probs, value_states)
+ expected_shape = (batch_size, self.num_attn_heads, tgt_len, self.head_dim)
+ if attn_output.size() != expected_shape:
+ raise ValueError(f"`attn_output` should have shape {expected_shape}, but is of shape {attn_output.size()}")
+
+ attn_output = attn_output.transpose(1, 2).reshape(batch_size, tgt_len, hidden_size)
+ attn_output = self.out_proj(attn_output)
+
+ attn_output = nn.functional.dropout(attn_output, p=self.dropout, training=self.training)
+ return attn_output, attn_weights_reshaped, past_key_value
+
+
+class ProphetNetFeedForward(nn.Module):
+ """
+ This is the residual two feed-forward layer block based on the original Transformer implementation.
+ """
+
+ def __init__(self, config: ProphetNetConfig, ffn_dim: int):
+ super().__init__()
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.intermediate = nn.Linear(config.hidden_size, ffn_dim)
+ self.output = nn.Linear(ffn_dim, config.hidden_size)
+ self.activation_dropout = config.activation_dropout
+ self.dropout = config.dropout
+
+ def forward(self, hidden_states):
+ hidden_states = self.intermediate(hidden_states)
+ hidden_states = self.activation_fn(hidden_states)
+
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+ hidden_states = self.output(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ return hidden_states
+
+
+class ProphetNetNgramSelfAttention(nn.Module):
+ def __init__(self, config: ProphetNetConfig):
+ super().__init__()
+ self.hidden_size = config.hidden_size
+
+ self.num_buckets = config.num_buckets
+ self.relative_max_distance = config.relative_max_distance
+ self.num_attn_heads = config.num_decoder_attention_heads
+ self.dropout = config.dropout
+ self.attention_dropout = config.attention_dropout
+ self.head_dim = config.hidden_size // self.num_attn_heads
+ self.ngram = config.ngram
+
+ assert (
+ self.head_dim * self.num_attn_heads == config.hidden_size
+ ), "config.hidden_size must be divisible by num_attn_heads"
+ # key, value, query projection
+ self.key_proj = nn.Linear(config.hidden_size, config.hidden_size)
+ self.value_proj = nn.Linear(config.hidden_size, config.hidden_size)
+ self.query_proj = nn.Linear(config.hidden_size, config.hidden_size)
+
+ # out projection
+ self.out_proj = nn.Linear(config.hidden_size, config.hidden_size)
+
+ # rel position embeddings
+ self.relative_pos_embeddings = nn.Linear(config.hidden_size, self.num_buckets * self.num_attn_heads)
+
+ # for onnx runtime
+ self.onnx_trace = False
+
+ def _shape(self, tensor, seq_len, batch_size):
+ return tensor.view(batch_size, seq_len, self.num_attn_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def prepare_for_onnx_export_(self):
+ self.onnx_trace = True
+
+ def forward(
+ self,
+ hidden_states,
+ past_key_value: Optional[Tuple[Tensor]] = None,
+ attention_mask=None,
+ layer_head_mask=None,
+ extended_predict_attention_mask=None,
+ main_relative_position_buckets=None,
+ predict_relative_position_buckets=None,
+ position_ids=None,
+ ):
+ batch_size, ngram_sequence_length, hidden_size = hidden_states.size()
+ assert list(hidden_states.size()) == [batch_size, ngram_sequence_length, hidden_size], (
+ f"`hidden_states` should be of shape {batch_size, ngram_sequence_length, hidden_size}, but is of shape"
+ f" {hidden_states.shape}"
+ )
+
+ # project
+ query_states = self.query_proj(hidden_states)
+ key_states = self.key_proj(hidden_states)
+ value_states = self.value_proj(hidden_states)
+
+ # normalize
+ query_states = query_states / (self.head_dim**0.5)
+
+ # reshape
+ query_states = self._shape(query_states, ngram_sequence_length, batch_size)
+ key_states = self._shape(key_states, -1, batch_size)
+ value_states = self._shape(value_states, -1, batch_size)
+ proj_shape = (batch_size, self.num_attn_heads, -1, self.head_dim)
+
+ query_states = query_states.view(*proj_shape)
+ key_states = key_states.view(*proj_shape)
+ value_states = value_states.view(*proj_shape)
+
+ # chunk into main stream and predict stream
+ hidden_states_list = hidden_states.chunk(1 + self.ngram, dim=1)
+ query_states_list = query_states.chunk(1 + self.ngram, dim=2)
+ key_states_list = key_states.chunk(1 + self.ngram, dim=2)
+ value_states_list = value_states.chunk(1 + self.ngram, dim=2)
+
+ main_hidden_states, hidden_states_predict_list = hidden_states_list[0], hidden_states_list[1:]
+ main_query_states, predict_query_states_list = query_states_list[0], query_states_list[1:]
+ main_key_states, predict_key_states_list = key_states_list[0], key_states_list[1:]
+ main_value_states, predict_value_states_list = value_states_list[0], value_states_list[1:]
+
+ # saved states are stored with shape (batch_size, num_attn_heads, seq_len, head_dim)
+ if past_key_value is not None:
+ prev_main_key_states = past_key_value[0]
+ main_key_states = torch.cat((prev_main_key_states, main_key_states), dim=2)
+ prev_main_value_states = past_key_value[1]
+ main_value_states = torch.cat((prev_main_value_states, main_value_states), dim=2)
+
+ # Update cache
+ past_key_value = (main_key_states, main_value_states)
+
+ # get seq_length of main stream only
+ sequence_length = ngram_sequence_length // (1 + self.ngram)
+
+ # MAIN-STREAM
+ # main attn weights
+ # [batch_size, number_heads, sequence_length, head_dimesion]
+ # x [batch_size, number_heads, head_dimesion, sequence_length]
+ # -> [batch_size, number_heads, sequence_length, sequence_length]
+ main_attn_weights = torch.einsum("bntc,bncs->bnts", main_query_states, main_key_states.transpose(2, 3))
+
+ # retrieve relative position embeddings for each layer -> see paper for more details
+ main_relative_pos_embeddings = self.get_main_relative_pos_embeddings(
+ main_hidden_states, main_attn_weights, position_ids, main_relative_position_buckets
+ )
+
+ main_attn_weights = main_attn_weights + main_relative_pos_embeddings
+
+ if attention_mask is not None:
+ main_attn_weights = main_attn_weights + attention_mask
+
+ main_attn_probs = softmax(
+ main_attn_weights,
+ dim=-1,
+ onnx_trace=self.onnx_trace,
+ ).type_as(main_attn_weights)
+
+ if layer_head_mask is not None:
+ assert layer_head_mask.size() == (self.num_attn_heads,), (
+ f"Head mask for a single layer should be of size {(self.num_attn_heads,)}, but is"
+ f" {layer_head_mask.size()}"
+ )
+ main_attn_probs = layer_head_mask.view(1, -1, 1, 1) * main_attn_probs.view(
+ batch_size, self.num_attn_heads, -1, sequence_length
+ )
+
+ main_attn_probs = nn.functional.dropout(main_attn_probs, p=self.attention_dropout, training=self.training)
+ # project to attn_output
+ # [batch_size, number_heads, sequence_length, sequence_length]
+ # x [batch_size, number_heads, sequence_length, head_dimesion]
+ # -> [batch_size, number_heads, sequence_length, head_dimesion]
+ main_attn_output = torch.einsum("bntc,bncs->bnts", main_attn_probs, main_value_states)
+ # reshape so that num_heads dim is merged into last `head_dim` axis
+ main_attn_output = main_attn_output.transpose(1, 2).reshape(batch_size, 1, sequence_length, hidden_size)
+ main_attn_output = self.out_proj(main_attn_output)
+
+ # PREDICT-STREAM
+ # [batch_size, ngram, number_heads, sequence_length, head_dimesion]
+ predict_query_states = torch.stack(predict_query_states_list, 1).view(
+ batch_size, self.ngram, self.num_attn_heads, sequence_length, self.head_dim
+ )
+
+ # [batch_size, ngram, number_heads, 2*sequence_length, head_dimesion]
+ predict_key_states = torch.stack([torch.cat([main_key_states, key], 2) for key in predict_key_states_list], 1)
+
+ # [batch_size, sequence_length, ngram, hidden_size]
+ predict_hidden_states = torch.stack(hidden_states_predict_list, dim=2)
+
+ # [batch_size, number_heads, ngram, 2*sequence_length, head_dimesion]
+ predict_value_states = torch.cat(
+ [torch.cat([main_value_states, v_p], 2).unsqueeze(2) for v_p in predict_value_states_list], 2
+ )
+
+ # [batch_size, ngram, number_heads, sequence_length, head_dimesion]
+ # x [batch_size, ngram, number_heads, 2*sequence_length, head_dimesion]
+ # -> [batch_size, ngram, number_heads, sequence_length, 2*sequence_length]
+ predict_attn_weights = torch.einsum("bnhtc,bnhsc->bnhts", (predict_query_states, predict_key_states))
+
+ # retrieve relative position embeddings for each layer -> see paper for more details
+ # [batch_size, ngram, number_heads, sequence_length, predict_relative_pos_embeddings]
+ predict_relative_pos_embeddings = self.get_predict_relative_pos_embeddings(
+ predict_hidden_states, predict_attn_weights, position_ids, predict_relative_position_buckets
+ )
+
+ # [batch_size, ngram, number_heads, sequence_length, 2*sequence_length]
+ predict_attn_weights = predict_attn_weights + predict_relative_pos_embeddings
+
+ if extended_predict_attention_mask is not None:
+ # Permuting Predict attention mask to [batch_size, ngram, number_heads, sequence_length, 2*sequence_length]
+ extended_predict_attention_mask = extended_predict_attention_mask.permute(0, 2, 1, 3, 4)
+ extended_predict_attention_mask = extended_predict_attention_mask.to(predict_attn_weights.dtype)
+ predict_attn_weights = predict_attn_weights + extended_predict_attention_mask
+
+ predict_attn_probs = softmax(
+ predict_attn_weights,
+ dim=-1,
+ onnx_trace=self.onnx_trace,
+ ).type_as(predict_attn_weights)
+
+ if layer_head_mask is not None:
+ assert layer_head_mask.size() == (self.num_attn_heads,), (
+ f"Head mask for a single layer should be of size {(self.num_attn_heads,)}, but is"
+ f" {layer_head_mask.size()}"
+ )
+ predict_attn_probs = layer_head_mask.view(1, 1, -1, 1, 1) * predict_attn_probs
+
+ predict_attn_probs = nn.functional.dropout(
+ predict_attn_probs, p=self.attention_dropout, training=self.training
+ )
+ # project to attention output
+ # [batch_size, ngram, number_heads, sequence_length, 2*sequence_length]
+ # x [batch_size, ngram, number_heads, 2*sequence_length, head_dimesion]
+ # -> [batch_size, ngram, number_heads, sequence_length, head_dimesion]
+ predict_attn_output = torch.einsum(
+ "bnhts,bnhsc->bnhtc", (predict_attn_probs, predict_value_states.transpose(1, 2))
+ )
+
+ # reshape so that num_heads dim is merged into last `head_dim` axis
+ # [batch_size, ngram, number_heads, sequence_length, head_dimesion] -> [batch_size, ngram, sequence_length, hidden_size]
+ predict_attn_output = predict_attn_output.transpose(2, 3)
+ predict_attn_output = predict_attn_output.reshape(batch_size, self.ngram, sequence_length, hidden_size)
+ predict_attn_output = self.out_proj(predict_attn_output)
+
+ # concat to single attn output
+ # [batch_size, (1+ngram)*sequence_length, hidden_size]
+ attn_output = torch.cat([main_attn_output, predict_attn_output], 1).view(batch_size, -1, hidden_size)
+ # reshape into better form for `config.output_attentions`
+ main_attn_probs = main_attn_probs.view(batch_size, self.num_attn_heads, sequence_length, -1)
+
+ attn_output = nn.functional.dropout(attn_output, p=self.dropout, training=self.training)
+
+ return attn_output, main_attn_probs, predict_attn_probs, past_key_value
+
+ def get_main_relative_pos_embeddings(
+ self, hidden_states, attn_weights, position_ids, main_relative_position_buckets
+ ):
+ # input hidden_states [batch_size, sequence_length, hidden_size]
+ # input attn_weights [batch_size, num_heads, sequence_length, sequence_length]
+ # input position_ids [batch_size, sequence_length] or [1,1]
+ batch_size, num_attn_heads, tgt_len, src_len = attn_weights.shape
+ attn_weights = attn_weights.view(batch_size, num_attn_heads, tgt_len, src_len)
+ if main_relative_position_buckets is None:
+ batch_size, sequence_length = hidden_states.shape[:2]
+ relative_positions = (
+ torch.arange(1, attn_weights.shape[-1] + 1)
+ .unsqueeze(0)
+ .unsqueeze(0)
+ .repeat(batch_size, sequence_length, 1)
+ .to(position_ids.device)
+ )
+ # [batch_size, sequence_length, sequence_length+1]
+ relative_positions = relative_positions - position_ids.unsqueeze(0).repeat(batch_size, sequence_length, 1)
+ main_relative_position_buckets = compute_relative_buckets(
+ self.num_buckets, self.relative_max_distance, relative_positions, False
+ )
+
+ # [batch_size, sequence_length, num_buckets * num_heads]
+ rel_pos_embeddings = self.relative_pos_embeddings(hidden_states)
+ rel_pos_embeddings = rel_pos_embeddings.view(
+ rel_pos_embeddings.shape[:2] + (self.num_buckets, self.num_attn_heads)
+ )
+ rel_pos_embeddings = rel_pos_embeddings.permute(0, 3, 1, 2)
+ # [batch_size, num_heads, sequence_length, num_buckets]
+ rel_pos_embeddings = rel_pos_embeddings.reshape(attn_weights.shape[:3] + (-1,))
+
+ main_relative_position_buckets = main_relative_position_buckets.repeat(1, self.num_attn_heads, 1)
+ # [batch_size * num_heads * sequence_length, sequence_length]
+ main_relative_position_buckets = main_relative_position_buckets.view(
+ -1, main_relative_position_buckets.shape[-1]
+ )
+ main_relative_position_buckets = main_relative_position_buckets.long()
+ # [batch_size * num_heads * sequence_length, sequence_length]
+ rel_pos_embeddings = rel_pos_embeddings.reshape(-1, rel_pos_embeddings.size(-1))
+
+ main_relative_pos_embeddings = torch.gather(rel_pos_embeddings, dim=1, index=main_relative_position_buckets)
+ main_relative_pos_embeddings = main_relative_pos_embeddings.view(batch_size, num_attn_heads, tgt_len, -1)
+ return main_relative_pos_embeddings
+
+ def get_predict_relative_pos_embeddings(
+ self, hidden_states, attn_weights, position_ids, predict_relative_position_buckets
+ ):
+ # input hidden_states [batch_size, sequence_length, ngram, hidden_size]
+ # input attn_weights [batch_size, ngram, num_heads, sequence_length, 2*sequence_length]
+ # input position_ids [batch_size, sequence_length] or [1,1]
+ # input predict_relative_position_buckets [batch_size, sequence_length, 2*sequence_length] or None
+ batch_size, sequence_length = hidden_states.shape[0:2]
+
+ if predict_relative_position_buckets is None:
+ key_sequence_length = attn_weights.shape[-1]
+ assert (
+ position_ids[0][0] == key_sequence_length - 1
+ ), "`position_ids` are incorrect. They should be of the format 1 2 3 4 5 ... (key_sequence_length - 1)"
+ relative_positions = (
+ torch.arange(0, key_sequence_length)
+ .unsqueeze(0)
+ .unsqueeze(0)
+ .repeat(batch_size, sequence_length, 1)
+ .to(position_ids.device)
+ )
+
+ relative_positions = relative_positions - position_ids.unsqueeze(0).repeat(batch_size, sequence_length, 1)
+ predict_relative_position_buckets = compute_relative_buckets(
+ self.num_buckets, self.relative_max_distance, relative_positions, False
+ )
+
+ # [batch_size, ngram, sequence_length, hidden_size]
+ hidden_states = hidden_states.transpose(1, 2)
+ rel_pos_embeddings = self.relative_pos_embeddings(hidden_states)
+
+ # [batch_size, ngram, sequence_length, num_buckets, num_heads]
+ rel_pos_embeddings = rel_pos_embeddings.view(
+ hidden_states.shape[:-1] + (self.num_buckets, self.num_attn_heads)
+ )
+ rel_pos_embeddings = rel_pos_embeddings.permute(0, 2, 1, 4, 3)
+ # [batch_size * ngram * sequence_length * num_heads, num_buckets]
+ rel_pos_embeddings = rel_pos_embeddings.reshape(-1, self.num_buckets)
+ # [ngram, batch_size, num_heads * sequence_length, -1]
+ predict_relative_position_buckets = predict_relative_position_buckets.unsqueeze(0)
+ predict_relative_position_buckets = predict_relative_position_buckets.repeat(
+ self.ngram, 1, self.num_attn_heads, 1
+ )
+ # [ngram * batch_size * num_heads * sequence_length, -1]
+ predict_relative_position_buckets = predict_relative_position_buckets.view(
+ -1, predict_relative_position_buckets.size(-1)
+ ).long()
+
+ predict_relative_pos_embeddings = torch.gather(
+ rel_pos_embeddings, dim=1, index=predict_relative_position_buckets
+ )
+
+ # [batch_size, gram, num_heads, sequence_length, -1]
+ predict_relative_pos_embeddings = predict_relative_pos_embeddings.view(
+ batch_size, self.ngram, self.num_attn_heads, sequence_length, -1
+ )
+
+ return predict_relative_pos_embeddings
+
+
+class ProphetNetEncoderLayer(nn.Module):
+ """
+ Encoder block for Prophetnet
+ """
+
+ def __init__(self, config: ProphetNetConfig):
+ super().__init__()
+ # 1st residual block
+ self.self_attn = ProphetNetAttention(config, config.num_encoder_attention_heads)
+ self.self_attn_layer_norm = LayerNorm(config.hidden_size)
+
+ # 2nd residual block
+ self.feed_forward = ProphetNetFeedForward(config, config.encoder_ffn_dim)
+ self.feed_forward_layer_norm = LayerNorm(config.hidden_size)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ output_attentions: bool = False,
+ ):
+ # 1st residual block
+ attention_output, attn_weights, _ = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = self.self_attn_layer_norm(attention_output + hidden_states)
+
+ # 2nd residual block
+ feed_forward_output = self.feed_forward(hidden_states)
+ hidden_states = self.feed_forward_layer_norm(feed_forward_output + hidden_states)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+class ProphetNetDecoderLayer(nn.Module):
+ """
+ Decoder block for Prophetnet
+ """
+
+ def __init__(self, config: ProphetNetConfig):
+ super().__init__()
+ # 1st residual block
+ self.self_attn = ProphetNetNgramSelfAttention(config)
+ self.self_attn_layer_norm = LayerNorm(config.hidden_size)
+
+ # 2nd residual block
+ if config.add_cross_attention:
+ self.cross_attn = ProphetNetAttention(config, config.num_decoder_attention_heads)
+ self.cross_attn_layer_norm = LayerNorm(config.hidden_size)
+
+ # 3rd residual block
+ self.feed_forward = ProphetNetFeedForward(config, config.decoder_ffn_dim)
+ self.feed_forward_layer_norm = LayerNorm(config.hidden_size)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ encoder_hidden_states=None,
+ encoder_attn_mask=None,
+ layer_head_mask=None,
+ cross_attn_layer_head_mask=None,
+ extended_predict_attention_mask=None,
+ main_relative_position_buckets=None,
+ predict_relative_position_buckets=None,
+ position_ids=None,
+ past_key_value=None,
+ use_cache: bool = True,
+ output_attentions: bool = False,
+ ):
+ # 1st residual block
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ ngram_attention_output, self_attn_weights, self_attn_weights_ngram, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ past_key_value=self_attn_past_key_value,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ extended_predict_attention_mask=extended_predict_attention_mask,
+ main_relative_position_buckets=main_relative_position_buckets,
+ predict_relative_position_buckets=predict_relative_position_buckets,
+ position_ids=position_ids,
+ )
+ hidden_states = self.self_attn_layer_norm(hidden_states + ngram_attention_output)
+
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ cross_attn_weights = None
+ if encoder_hidden_states is not None:
+ # 2nd residual block
+ attention_output, cross_attn_weights, cross_attn_present_key_value = self.cross_attn(
+ hidden_states=hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attn_mask,
+ layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=cross_attn_past_key_value,
+ output_attentions=output_attentions,
+ )
+ hidden_states = self.cross_attn_layer_norm(attention_output + hidden_states)
+
+ # add cross-attn to positions 3,4 of present_key_value tuple
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ # 3rd residual block
+ feed_forward_output = self.feed_forward(hidden_states)
+ hidden_states = self.feed_forward_layer_norm(feed_forward_output + hidden_states)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights, self_attn_weights_ngram, cross_attn_weights)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ return outputs
+
+
+@add_start_docstrings(
+ "The standalone encoder part of the ProphetNetModel.",
+ PROPHETNET_START_DOCSTRING,
+)
+class ProphetNetEncoder(ProphetNetPreTrainedModel):
+ r"""
+ word_embeddings (`torch.nn.Embeddings` of shape `(config.vocab_size, config.hidden_size)`, *optional*):
+ The word embedding parameters. This can be used to initialize [`ProphetNetEncoder`] with pre-defined word
+ embeddings instead of randomly initialized word embeddings.
+ """
+
+ def __init__(self, config: ProphetNetConfig, word_embeddings: nn.Embedding = None):
+ super().__init__(config)
+
+ self.word_embeddings = (
+ word_embeddings
+ if word_embeddings is not None
+ else nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ )
+ self.position_embeddings = ProphetNetPositionalEmbeddings(config)
+ self.embeddings_layer_norm = LayerNorm(config.hidden_size)
+
+ self.layers = nn.ModuleList([ProphetNetEncoderLayer(config) for _ in range(config.num_encoder_layers)])
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.word_embeddings = value
+
+ @add_start_docstrings_to_model_forward(PROPHETNET_STANDALONE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, ProphetNetEncoder
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased")
+ >>> model = ProphetNetEncoder.from_pretrained("patrickvonplaten/prophetnet-large-uncased-standalone")
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
+ >>> outputs = model(**inputs)
+
+ >>> last_hidden_states = outputs.last_hidden_state
+ ```"""
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is None and inputs_embeds is None:
+ raise ValueError("Either input_ids or inputs_embeds has to be passed.")
+ elif input_ids is not None and inputs_embeds is not None:
+ raise ValueError("Make sure to only pass input_ids or inputs_embeds.")
+ elif input_ids is not None and inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+
+ # prepare attention mask
+ if attention_mask is not None:
+ extended_attention_mask = (
+ 1.0 - attention_mask[:, None, None, :].repeat(1, self.config.num_encoder_attention_heads, 1, 1)
+ ) * torch.finfo(self.dtype).min
+ extended_attention_mask = extended_attention_mask.to(inputs_embeds.dtype)
+ else:
+ extended_attention_mask = None
+
+ position_embeddings, position_ids = self.position_embeddings(inputs_embeds.shape[:2], inputs_embeds.device)
+
+ hidden_states = inputs_embeds + position_embeddings
+ hidden_states = self.embeddings_layer_norm(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.config.dropout, training=self.training)
+
+ encoder_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ # check if head_mask has a correct number of layers specified if desired
+ if head_mask is not None:
+ assert head_mask.size()[0] == (
+ len(self.layers)
+ ), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_hidden_states = encoder_hidden_states + (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ encoder_layer.__call__,
+ hidden_states,
+ extended_attention_mask,
+ (head_mask[idx] if head_mask is not None else None),
+ output_attentions,
+ )
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask=extended_attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ encoder_hidden_states = encoder_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_hidden_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_hidden_states, attentions=all_attentions
+ )
+
+
+@add_start_docstrings(
+ "The standalone decoder part of the ProphetNetModel.",
+ PROPHETNET_START_DOCSTRING,
+)
+class ProphetNetDecoder(ProphetNetPreTrainedModel):
+ r"""
+ word_embeddings (`torch.nn.Embeddings` of shape `(config.vocab_size, config.hidden_size)`, *optional*):
+ The word embedding parameters. This can be used to initialize [`ProphetNetEncoder`] with pre-defined word
+ embeddings instead of randomly initialized word embeddings.
+ """
+
+ def __init__(self, config: ProphetNetConfig, word_embeddings: Optional[nn.Embedding] = None):
+ super().__init__(config)
+
+ self.ngram = config.ngram
+ self.num_buckets = config.num_buckets
+ self.relative_max_distance = config.relative_max_distance
+ self.dropout = config.dropout
+ self.max_target_positions = config.max_position_embeddings
+
+ self.word_embeddings = (
+ word_embeddings
+ if word_embeddings is not None
+ else nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ )
+ self.position_embeddings = ProphetNetPositionalEmbeddings(config)
+
+ self.ngram_embeddings = nn.Embedding(self.ngram, config.hidden_size, None)
+ self.layers = nn.ModuleList([ProphetNetDecoderLayer(config) for _ in range(config.num_decoder_layers)])
+ self.embeddings_layer_norm = LayerNorm(config.hidden_size)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.word_embeddings = value
+
+ @add_start_docstrings_to_model_forward(PROPHETNET_STANDALONE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=ProphetNetDecoderModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, ProphetNetDecoderModelOutput]:
+ r"""
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, ProphetNetDecoder
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased")
+ >>> model = ProphetNetDecoder.from_pretrained("microsoft/prophetnet-large-uncased", add_cross_attention=False)
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
+ >>> outputs = model(**inputs)
+
+ >>> last_hidden_states = outputs.last_hidden_state
+ ```"""
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is None and inputs_embeds is None:
+ raise ValueError("Either `decoder_input_ids` or `decoder_inputs_embeds` has to be passed.")
+ elif input_ids is not None and inputs_embeds is not None:
+ raise ValueError("Make sure to only pass `decoder_input_ids` or `decoder_inputs_embeds`.")
+ elif input_ids is not None and inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+
+ batch_size, sequence_length = inputs_embeds.shape[:2]
+
+ main_stream_pos_embed, position_ids = self.position_embeddings(
+ (batch_size, sequence_length),
+ device=inputs_embeds.device,
+ past_key_values=past_key_values,
+ )
+
+ if past_key_values is not None:
+ main_relative_position_buckets, predict_relative_position_buckets = None, None
+ else:
+ (
+ main_relative_position_buckets,
+ predict_relative_position_buckets,
+ ) = self.compute_buffered_relative_buckets(position_ids)
+ predicting_stream_pos_embed = self.position_embeddings._forward(position_ids + 1)
+
+ # add position embeddings
+ hidden_states = inputs_embeds + main_stream_pos_embed
+
+ ngram_embeddings = self.ngram_embeddings.weight
+
+ # prepare attention mask
+ if past_key_values is not None:
+ assert (
+ hidden_states.size(1) == 1
+ ), "At the moment `use_cache` is only supported for `decoder_input_ids` of length 1"
+
+ ngram_hidden_states = [
+ (ngram_embeddings[ngram - 1] + predicting_stream_pos_embed).repeat(batch_size, 1, 1)
+ for ngram in range(self.ngram)
+ ]
+ extended_attention_mask = None
+ extended_predict_attention_mask = None
+ else:
+ ngram_hidden_states = [
+ (ngram_embeddings[ngram - 1] + predicting_stream_pos_embed) for ngram in range(self.ngram)
+ ]
+ extended_attention_mask = self.prepare_attention_mask(hidden_states, attention_mask)
+ extended_predict_attention_mask = self.prepare_predict_attention_mask(hidden_states, attention_mask)
+
+ # prepare encoder attention mask
+ if encoder_attention_mask is not None:
+ extended_encoder_attention_mask = (
+ 1.0 - encoder_attention_mask[:, None, None, :].repeat(1, self.config.num_decoder_attention_heads, 1, 1)
+ ) * torch.finfo(self.dtype).min
+ extended_encoder_attention_mask = extended_encoder_attention_mask.to(inputs_embeds.dtype)
+ else:
+ extended_encoder_attention_mask = None
+
+ hidden_states = torch.cat([hidden_states] + ngram_hidden_states, 1)
+
+ if self.embeddings_layer_norm:
+ hidden_states = self.embeddings_layer_norm(hidden_states)
+
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ # init attentions, hidden_states and cache with empty tuples
+ all_main_stream_hidden_states = () if output_hidden_states else None
+ all_ngram_stream_hidden_states = () if output_hidden_states and self.config.ngram > 0 else None
+
+ all_main_stream_attns = () if output_attentions else None
+ all_ngram_stream_attns = () if output_attentions else None
+ all_cross_attns = () if output_attentions and self.config.add_cross_attention else None
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ present_key_values = () if use_cache else None
+
+ # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
+ for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
+ if attn_mask is not None:
+ assert attn_mask.size()[0] == (len(self.layers)), (
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
+ f" {head_mask.size()[0]}."
+ )
+ for idx, decoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ # grad cannot be kept because tensor is sliced
+ all_main_stream_hidden_states += (hidden_states[:, :sequence_length],)
+ if self.config.ngram > 0:
+ all_ngram_stream_hidden_states += (hidden_states[:, sequence_length:],)
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ extended_attention_mask,
+ encoder_hidden_states,
+ extended_encoder_attention_mask,
+ (head_mask[idx] if head_mask is not None else None),
+ (cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None),
+ extended_predict_attention_mask,
+ main_relative_position_buckets,
+ predict_relative_position_buckets,
+ position_ids,
+ None,
+ use_cache,
+ output_attentions,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=extended_attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attn_mask=extended_encoder_attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ cross_attn_layer_head_mask=(
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
+ ),
+ extended_predict_attention_mask=extended_predict_attention_mask,
+ main_relative_position_buckets=main_relative_position_buckets,
+ predict_relative_position_buckets=predict_relative_position_buckets,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ present_key_values += (layer_outputs[4 if output_attentions else 1],)
+
+ if output_attentions:
+ all_main_stream_attns += (layer_outputs[1],)
+ all_ngram_stream_attns += (layer_outputs[2],)
+
+ if self.config.add_cross_attention:
+ all_cross_attns += (layer_outputs[3],)
+
+ if output_hidden_states:
+ all_main_stream_hidden_states += (hidden_states[:, :sequence_length],)
+ if self.config.ngram > 0:
+ all_ngram_stream_hidden_states += (hidden_states[:, sequence_length:],)
+
+ # split last_hidden_state for return
+ last_hidden_state = hidden_states[:, :sequence_length]
+ last_hidden_state_ngram = hidden_states[:, sequence_length:] if self.config.ngram > 0 else None
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ last_hidden_state,
+ last_hidden_state_ngram,
+ present_key_values,
+ all_main_stream_hidden_states,
+ all_ngram_stream_hidden_states,
+ all_main_stream_attns,
+ all_ngram_stream_attns,
+ all_cross_attns,
+ ]
+ if v is not None
+ )
+ return ProphetNetDecoderModelOutput(
+ last_hidden_state=last_hidden_state,
+ last_hidden_state_ngram=last_hidden_state_ngram,
+ past_key_values=present_key_values,
+ hidden_states=all_main_stream_hidden_states,
+ hidden_states_ngram=all_ngram_stream_hidden_states,
+ attentions=all_main_stream_attns,
+ ngram_attentions=all_ngram_stream_attns,
+ cross_attentions=all_cross_attns,
+ )
+
+ def compute_buffered_relative_buckets(self, position_ids):
+ batch_size, sequence_length = position_ids.shape
+
+ position_ids = torch.arange(1, self.max_target_positions).to(position_ids.device).repeat(1, 1)
+ main_relative_buckets, predict_relative_buckets = compute_all_stream_relative_buckets(
+ self.num_buckets, self.relative_max_distance, position_ids
+ )
+
+ # buffer relative buckets
+ main_relative_buckets = main_relative_buckets[:, :sequence_length, :sequence_length].repeat(batch_size, 1, 1)
+ predict_relative_buckets = torch.cat(
+ [
+ predict_relative_buckets[:, :sequence_length, :sequence_length],
+ predict_relative_buckets[
+ :, :sequence_length, self.max_target_positions : self.max_target_positions + sequence_length
+ ],
+ ],
+ 2,
+ ).repeat(batch_size, 1, 1)
+
+ return main_relative_buckets, predict_relative_buckets
+
+ def prepare_attention_mask(self, hidden_states, attention_mask):
+ batch_size, seq_length = hidden_states.shape[:2]
+
+ # get causal mask
+ causal_mask = torch.full(
+ (seq_length, seq_length),
+ torch.finfo(hidden_states.dtype).min,
+ dtype=hidden_states.dtype,
+ device=hidden_states.device,
+ )
+ causal_mask = torch.triu(causal_mask, 1)
+
+ extended_causal_mask = causal_mask[:seq_length, :seq_length][None, None, :, :].expand(
+ (batch_size, self.config.num_decoder_attention_heads) + causal_mask.shape
+ )
+
+ # add usual attention mask
+ if attention_mask is not None:
+ extended_attention_mask = (1.0 - attention_mask[:, None, None, :]) * torch.finfo(self.dtype).min
+ extended_attention_mask = extended_causal_mask + extended_attention_mask
+ else:
+ extended_attention_mask = extended_causal_mask
+ return extended_attention_mask.to(hidden_states.dtype)
+
+ def prepare_predict_attention_mask(self, hidden_states, attention_mask):
+ batch_size, seq_length = hidden_states.shape[:2]
+
+ # get causal mask
+ predict_causal_mask = ngram_attention_bias(
+ self.max_target_positions, self.ngram, hidden_states.device, hidden_states.dtype
+ )
+ predict_causal_mask = torch.cat(
+ [
+ predict_causal_mask[:, :seq_length, :seq_length],
+ predict_causal_mask[
+ :, :seq_length, self.max_target_positions : self.max_target_positions + seq_length
+ ],
+ ],
+ dim=-1,
+ )
+ extended_predict_causal_mask = predict_causal_mask[None, None, :, :, :].expand(
+ (batch_size, self.config.num_decoder_attention_heads) + predict_causal_mask.shape
+ )
+
+ # add usual attention mask
+ if attention_mask is not None:
+ extended_attention_mask = (1.0 - attention_mask[:, None, None, None, :]) * torch.finfo(self.dtype).min
+ extended_attention_mask = extended_attention_mask.expand(
+ (batch_size, self.config.num_decoder_attention_heads, self.ngram, seq_length, seq_length)
+ )
+ # predicted stream attention_mask should always be 0
+ extended_attention_mask = torch.cat(
+ [extended_attention_mask, torch.zeros_like(extended_attention_mask)], dim=-1
+ )
+ extended_predict_attention_mask = extended_predict_causal_mask + extended_attention_mask
+ else:
+ extended_predict_attention_mask = extended_predict_causal_mask
+ return extended_predict_attention_mask.to(hidden_states.dtype)
+
+
+@add_start_docstrings(
+ "The bare ProphetNet Model outputting raw hidden-states without any specific head on top.",
+ PROPHETNET_START_DOCSTRING,
+)
+class ProphetNetModel(ProphetNetPreTrainedModel):
+ _tied_weights_keys = ["encoder.word_embeddings.weight", "decoder.word_embeddings.weight"]
+
+ def __init__(self, config: ProphetNetConfig):
+ super().__init__(config)
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+
+ encoder_config = copy.deepcopy(config)
+ encoder_config.is_encoder_decoder = False
+ encoder_config.use_cache = False
+ self.encoder = ProphetNetEncoder(encoder_config, self.word_embeddings)
+
+ decoder_config = copy.deepcopy(config)
+ decoder_config.is_decoder = True
+ decoder_config.is_encoder_decoder = False
+ self.decoder = ProphetNetDecoder(decoder_config, self.word_embeddings)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.word_embeddings = value
+ self.encoder.word_embeddings = self.word_embeddings
+ self.decoder.word_embeddings = self.word_embeddings
+
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.encoder.word_embeddings, self.word_embeddings)
+ self._tie_or_clone_weights(self.decoder.word_embeddings, self.word_embeddings)
+
+ def get_encoder(self):
+ return self.encoder
+
+ def get_decoder(self):
+ return self.decoder
+
+ @add_start_docstrings_to_model_forward(PROPHETNET_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=ProphetNetSeq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.Tensor] = None,
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ decoder_inputs_embeds: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, ProphetNetSeq2SeqModelOutput]:
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, ProphetNetModel
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased")
+ >>> model = ProphetNetModel.from_pretrained("microsoft/prophetnet-large-uncased")
+
+ >>> input_ids = tokenizer(
+ ... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
+ ... ).input_ids # Batch size 1
+ >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
+ >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
+
+ >>> last_hidden_states = outputs.last_hidden_state # main stream hidden states
+ >>> last_hidden_states_ngram = outputs.last_hidden_state_ngram # predict hidden states
+ ```"""
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if encoder_outputs is None:
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ # decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn)
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=attention_mask,
+ head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=decoder_inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ use_cache=use_cache,
+ return_dict=return_dict,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+ return ProphetNetSeq2SeqModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ last_hidden_state_ngram=decoder_outputs.last_hidden_state_ngram,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_ngram_hidden_states=decoder_outputs.hidden_states_ngram,
+ decoder_attentions=decoder_outputs.attentions,
+ decoder_ngram_attentions=decoder_outputs.ngram_attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ "The ProphetNet Model with a language modeling head. Can be used for sequence generation tasks.",
+ PROPHETNET_START_DOCSTRING,
+)
+class ProphetNetForConditionalGeneration(ProphetNetPreTrainedModel):
+ _tied_weights_keys = ["encoder.word_embeddings.weight", "decoder.word_embeddings.weight", "lm_head.weight"]
+
+ def __init__(self, config: ProphetNetConfig):
+ super().__init__(config)
+ self.prophetnet = ProphetNetModel(config)
+ self.padding_idx = config.pad_token_id
+ self.disable_ngram_loss = config.disable_ngram_loss
+
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.prophetnet.word_embeddings, self.lm_head)
+
+ def get_input_embeddings(self):
+ return self.prophetnet.word_embeddings
+
+ @add_start_docstrings_to_model_forward(PROPHETNET_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=ProphetNetSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.Tensor] = None,
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[torch.Tensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ decoder_inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, ProphetNetSeq2SeqLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for
+ labels in `[0, ..., config.vocab_size]`
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, ProphetNetForConditionalGeneration
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased")
+ >>> model = ProphetNetForConditionalGeneration.from_pretrained("microsoft/prophetnet-large-uncased")
+
+ >>> input_ids = tokenizer(
+ ... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
+ ... ).input_ids # Batch size 1
+ >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
+ >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
+
+ >>> logits_next_token = outputs.logits # logits to predict next token as usual
+ >>> logits_ngram_next_tokens = outputs.logits_ngram # logits to predict 2nd, 3rd, ... next tokens
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
+ # get decoder inputs from shifting lm labels to the right
+ decoder_input_ids = self._shift_right(labels)
+
+ outputs = self.prophetnet(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ encoder_outputs=encoder_outputs,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ decoder_inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ batch_size, sequence_length = (
+ decoder_input_ids.shape if decoder_input_ids is not None else decoder_inputs_embeds.shape[:2]
+ )
+
+ predicting_streams = outputs[1].view(batch_size, self.config.ngram, sequence_length, -1)
+ predict_logits = self.lm_head(predicting_streams)
+
+ logits = predict_logits[:, 0]
+ logits_ngram = predict_logits[:, 1:] if self.config.ngram > 1 else None
+
+ # To use .view in loss computation, make sure that logits is contiguous.
+ if not logits.is_contiguous():
+ logits = logits.contiguous()
+
+ loss = None
+ if labels is not None:
+ loss = self._compute_loss(predict_logits, labels)
+
+ if not return_dict:
+ all_logits = tuple(v for v in [logits, logits_ngram] if v is not None)
+ return (loss,) + all_logits + outputs[2:] if loss is not None else all_logits + outputs[2:]
+ else:
+ return ProphetNetSeq2SeqLMOutput(
+ loss=loss,
+ logits=logits,
+ logits_ngram=logits_ngram,
+ past_key_values=outputs.past_key_values,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_ngram_hidden_states=outputs.decoder_ngram_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ decoder_ngram_attentions=outputs.decoder_ngram_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ )
+
+ def _compute_loss(self, logits, labels, ignore_index=-100):
+ expend_targets = labels.new_zeros(self.config.ngram, labels.size(0), labels.size(1)).fill_(ignore_index)
+
+ for i in range(self.config.ngram):
+ if i > 0 and self.disable_ngram_loss:
+ break
+ expend_targets[i, :, :] = labels
+
+ logits = logits.transpose(0, 1).contiguous()
+ lprobs = nn.functional.log_softmax(
+ logits.view(-1, logits.size(-1)),
+ dim=-1,
+ dtype=torch.float32,
+ )
+
+ loss = nn.functional.nll_loss(lprobs, expend_targets.view(-1), reduction="mean")
+
+ if self.config.eps > 0.0:
+ smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
+ non_masked_tokens = expend_targets.ne(ignore_index).view(-1)
+ smooth_loss = smooth_loss[non_masked_tokens]
+ smooth_loss = smooth_loss.mean()
+
+ eps_i = self.config.eps / lprobs.size(-1)
+ loss = (1.0 - self.config.eps) * loss + eps_i * smooth_loss
+
+ return loss
+
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ head_mask=None,
+ decoder_head_mask=None,
+ cross_attn_head_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ assert encoder_outputs is not None, "`encoder_outputs` have to be passed for generation."
+
+ if past_key_values:
+ decoder_input_ids = decoder_input_ids[:, -1:]
+ # first step, decoder_cached_states are empty
+ return {
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
+ "encoder_outputs": encoder_outputs,
+ "past_key_values": past_key_values,
+ "decoder_input_ids": decoder_input_ids,
+ "attention_mask": attention_mask,
+ "head_mask": head_mask,
+ "decoder_head_mask": decoder_head_mask,
+ "cross_attn_head_mask": cross_attn_head_mask,
+ "use_cache": use_cache,
+ }
+
+ def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
+ return self._shift_right(labels)
+
+ @staticmethod
+ # Copied from transformers.models.bart.modeling_bart.BartForConditionalGeneration._reorder_cache
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ # cached cross_attention states don't have to be reordered -> they are always the same
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2])
+ + layer_past[2:],
+ )
+ return reordered_past
+
+ def get_encoder(self):
+ return self.prophetnet.encoder
+
+ def get_decoder(self):
+ return self.prophetnet.decoder
+
+
+@add_start_docstrings(
+ "The standalone decoder part of the ProphetNetModel with a lm head on top. The model can be used for causal"
+ " language modeling.",
+ PROPHETNET_START_DOCSTRING,
+)
+class ProphetNetForCausalLM(ProphetNetPreTrainedModel):
+ _tied_weights_keys = [
+ "prophetnet.word_embeddings.weight",
+ "prophetnet.decoder.word_embeddings.weight",
+ "lm_head.weight",
+ ]
+
+ def __init__(self, config: ProphetNetConfig):
+ # set config for CLM
+ config = copy.deepcopy(config)
+ config.is_decoder = True
+ config.is_encoder_decoder = False
+ super().__init__(config)
+ self.prophetnet = ProphetNetDecoderWrapper(config)
+
+ self.padding_idx = config.pad_token_id
+ self.disable_ngram_loss = config.disable_ngram_loss
+
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.prophetnet.decoder.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.prophetnet.decoder.word_embeddings = value
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.prophetnet.decoder.word_embeddings, self.lm_head)
+
+ def set_decoder(self, decoder):
+ self.prophetnet.decoder = decoder
+
+ def get_decoder(self):
+ return self.prophetnet.decoder
+
+ @add_start_docstrings_to_model_forward(PROPHETNET_STANDALONE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=ProphetNetDecoderLMOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, ProphetNetDecoderLMOutput]:
+ r"""
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
+ ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, ProphetNetForCausalLM
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased")
+ >>> model = ProphetNetForCausalLM.from_pretrained("microsoft/prophetnet-large-uncased")
+ >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
+ >>> outputs = model(**inputs)
+
+ >>> logits = outputs.logits
+
+ >>> # Model can also be used with EncoderDecoder framework
+ >>> from transformers import BertTokenizer, EncoderDecoderModel, AutoTokenizer
+ >>> import torch
+
+ >>> tokenizer_enc = BertTokenizer.from_pretrained("google-bert/bert-large-uncased")
+ >>> tokenizer_dec = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased")
+ >>> model = EncoderDecoderModel.from_encoder_decoder_pretrained(
+ ... "google-bert/bert-large-uncased", "microsoft/prophetnet-large-uncased"
+ ... )
+
+ >>> ARTICLE = (
+ ... "the us state department said wednesday it had received no "
+ ... "formal word from bolivia that it was expelling the us ambassador there "
+ ... "but said the charges made against him are `` baseless ."
+ ... )
+ >>> input_ids = tokenizer_enc(ARTICLE, return_tensors="pt").input_ids
+ >>> labels = tokenizer_dec(
+ ... "us rejects charges against its ambassador in bolivia", return_tensors="pt"
+ ... ).input_ids
+ >>> outputs = model(input_ids=input_ids, decoder_input_ids=labels[:, :-1], labels=labels[:, 1:])
+
+ >>> loss = outputs.loss
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn)
+ outputs = self.prophetnet.decoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ head_mask=head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ batch_size, sequence_length = input_ids.shape if input_ids is not None else inputs_embeds.shape[:2]
+
+ predicting_streams = outputs[1].view(batch_size, self.config.ngram, sequence_length, -1)
+ predict_logits = self.lm_head(predicting_streams)
+
+ logits = predict_logits[:, 0]
+ logits_ngram = predict_logits[:, 1:] if self.config.ngram > 1 else None
+
+ loss = None
+ if labels is not None:
+ loss = self._compute_loss(predict_logits, labels)
+
+ if not return_dict:
+ all_logits = tuple(v for v in [logits, logits_ngram] if v is not None)
+ return (loss,) + all_logits + outputs[2:] if loss is not None else all_logits + outputs[2:]
+ else:
+ return ProphetNetDecoderLMOutput(
+ loss=loss,
+ logits=logits,
+ logits_ngram=logits_ngram,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ hidden_states_ngram=outputs.hidden_states_ngram,
+ attentions=outputs.attentions,
+ ngram_attentions=outputs.ngram_attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+ def _compute_loss(self, logits, labels, ignore_index=-100):
+ expend_targets = labels.new_zeros(self.config.ngram, labels.size(0), labels.size(1)).fill_(ignore_index)
+
+ for i in range(self.config.ngram):
+ if i > 0 and self.disable_ngram_loss:
+ break
+ expend_targets[i, :, :] = labels
+
+ logits = logits.transpose(0, 1).contiguous()
+ lprobs = nn.functional.log_softmax(
+ logits.view(-1, logits.size(-1)),
+ dim=-1,
+ dtype=torch.float32,
+ )
+
+ loss = nn.functional.nll_loss(lprobs, expend_targets.view(-1), reduction="mean")
+
+ if self.config.eps > 0.0:
+ smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
+ non_masked_tokens = expend_targets.ne(ignore_index).view(-1)
+ smooth_loss = smooth_loss[non_masked_tokens]
+ smooth_loss = smooth_loss.mean()
+
+ eps_i = self.config.eps / lprobs.size(-1)
+ loss = (1.0 - self.config.eps) * loss + eps_i * smooth_loss
+
+ return loss
+
+ def prepare_inputs_for_generation(
+ self,
+ input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ head_mask=None,
+ use_cache=None,
+ **kwargs,
+ ):
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
+ if attention_mask is None:
+ attention_mask = input_ids.new_ones(input_ids.shape)
+
+ if past_key_values:
+ input_ids = input_ids[:, -1:]
+ # first step, decoder_cached_states are empty
+ return {
+ "input_ids": input_ids, # encoder_outputs is defined. input_ids not needed
+ "attention_mask": attention_mask,
+ "head_mask": head_mask,
+ "past_key_values": past_key_values,
+ "use_cache": use_cache,
+ }
+
+ @staticmethod
+ # Copied from transformers.models.bart.modeling_bart.BartForCausalLM._reorder_cache
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+ return reordered_past
+
+
+class ProphetNetDecoderWrapper(ProphetNetPreTrainedModel):
+ """
+ This is a wrapper class, so that [`ProphetNetForCausalLM`] can correctly be loaded from pretrained prophetnet
+ classes.
+ """
+
+ def __init__(self, config: ProphetNetConfig):
+ super().__init__(config)
+
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ self.decoder = ProphetNetDecoder(config, word_embeddings=self.word_embeddings)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def _tie_weights(self):
+ self._tie_or_clone_weights(self.word_embeddings, self.decoder.get_input_embeddings())
+
+ def forward(self, *args, **kwargs):
+ return self.decoder(*args, **kwargs)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/tokenization_prophetnet.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/tokenization_prophetnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd387520af18efcce7e49fe3450485fd56a0e204
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/prophetnet/tokenization_prophetnet.py
@@ -0,0 +1,499 @@
+# coding=utf-8
+# Copyright 2020 The Microsoft Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections
+import os
+import unicodedata
+from typing import Iterable, List, Optional, Tuple
+
+from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "prophetnet.tokenizer"}
+
+
+# Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
+def whitespace_tokenize(text):
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
+ text = text.strip()
+ if not text:
+ return []
+ tokens = text.split()
+ return tokens
+
+
+# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
+class BasicTokenizer(object):
+ """
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
+
+ Args:
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ never_split (`Iterable`, *optional*):
+ Collection of tokens which will never be split during tokenization. Only has an effect when
+ `do_basic_tokenize=True`
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters.
+
+ This should likely be deactivated for Japanese (see this
+ [issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
+ the full context of the words, such as contractions.
+ """
+
+ def __init__(
+ self,
+ do_lower_case=True,
+ never_split=None,
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ do_split_on_punc=True,
+ ):
+ if never_split is None:
+ never_split = []
+ self.do_lower_case = do_lower_case
+ self.never_split = set(never_split)
+ self.tokenize_chinese_chars = tokenize_chinese_chars
+ self.strip_accents = strip_accents
+ self.do_split_on_punc = do_split_on_punc
+
+ def tokenize(self, text, never_split=None):
+ """
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
+
+ Args:
+ never_split (`List[str]`, *optional*)
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
+ """
+ # union() returns a new set by concatenating the two sets.
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
+ text = self._clean_text(text)
+
+ # This was added on November 1st, 2018 for the multilingual and Chinese
+ # models. This is also applied to the English models now, but it doesn't
+ # matter since the English models were not trained on any Chinese data
+ # and generally don't have any Chinese data in them (there are Chinese
+ # characters in the vocabulary because Wikipedia does have some Chinese
+ # words in the English Wikipedia.).
+ if self.tokenize_chinese_chars:
+ text = self._tokenize_chinese_chars(text)
+ # prevents treating the same character with different unicode codepoints as different characters
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
+ split_tokens = []
+ for token in orig_tokens:
+ if token not in never_split:
+ if self.do_lower_case:
+ token = token.lower()
+ if self.strip_accents is not False:
+ token = self._run_strip_accents(token)
+ elif self.strip_accents:
+ token = self._run_strip_accents(token)
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
+
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
+ return output_tokens
+
+ def _run_strip_accents(self, text):
+ """Strips accents from a piece of text."""
+ text = unicodedata.normalize("NFD", text)
+ output = []
+ for char in text:
+ cat = unicodedata.category(char)
+ if cat == "Mn":
+ continue
+ output.append(char)
+ return "".join(output)
+
+ def _run_split_on_punc(self, text, never_split=None):
+ """Splits punctuation on a piece of text."""
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
+ return [text]
+ chars = list(text)
+ i = 0
+ start_new_word = True
+ output = []
+ while i < len(chars):
+ char = chars[i]
+ if _is_punctuation(char):
+ output.append([char])
+ start_new_word = True
+ else:
+ if start_new_word:
+ output.append([])
+ start_new_word = False
+ output[-1].append(char)
+ i += 1
+
+ return ["".join(x) for x in output]
+
+ def _tokenize_chinese_chars(self, text):
+ """Adds whitespace around any CJK character."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if self._is_chinese_char(cp):
+ output.append(" ")
+ output.append(char)
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+ def _is_chinese_char(self, cp):
+ """Checks whether CP is the codepoint of a CJK character."""
+ # This defines a "chinese character" as anything in the CJK Unicode block:
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
+ #
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
+ # despite its name. The modern Korean Hangul alphabet is a different block,
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
+ # space-separated words, so they are not treated specially and handled
+ # like the all of the other languages.
+ if (
+ (cp >= 0x4E00 and cp <= 0x9FFF)
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
+ or (cp >= 0xF900 and cp <= 0xFAFF)
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
+ ): #
+ return True
+
+ return False
+
+ def _clean_text(self, text):
+ """Performs invalid character removal and whitespace cleanup on text."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
+ continue
+ if _is_whitespace(char):
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+
+# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
+class WordpieceTokenizer(object):
+ """Runs WordPiece tokenization."""
+
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
+ self.vocab = vocab
+ self.unk_token = unk_token
+ self.max_input_chars_per_word = max_input_chars_per_word
+
+ def tokenize(self, text):
+ """
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
+ tokenization using the given vocabulary.
+
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
+
+ Args:
+ text: A single token or whitespace separated tokens. This should have
+ already been passed through *BasicTokenizer*.
+
+ Returns:
+ A list of wordpiece tokens.
+ """
+
+ output_tokens = []
+ for token in whitespace_tokenize(text):
+ chars = list(token)
+ if len(chars) > self.max_input_chars_per_word:
+ output_tokens.append(self.unk_token)
+ continue
+
+ is_bad = False
+ start = 0
+ sub_tokens = []
+ while start < len(chars):
+ end = len(chars)
+ cur_substr = None
+ while start < end:
+ substr = "".join(chars[start:end])
+ if start > 0:
+ substr = "##" + substr
+ if substr in self.vocab:
+ cur_substr = substr
+ break
+ end -= 1
+ if cur_substr is None:
+ is_bad = True
+ break
+ sub_tokens.append(cur_substr)
+ start = end
+
+ if is_bad:
+ output_tokens.append(self.unk_token)
+ else:
+ output_tokens.extend(sub_tokens)
+ return output_tokens
+
+
+def load_vocab(vocab_file):
+ """Loads a vocabulary file into a dictionary."""
+ vocab = collections.OrderedDict()
+ with open(vocab_file, "r", encoding="utf-8") as reader:
+ tokens = reader.readlines()
+ for index, token in enumerate(tokens):
+ token = token.rstrip("\n")
+ vocab[token] = index
+ return vocab
+
+
+class ProphetNetTokenizer(PreTrainedTokenizer):
+ r"""
+ Construct a ProphetNetTokenizer. Based on WordPiece.
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ File containing the vocabulary.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
+ Whether or not to do basic tokenization before WordPiece.
+ never_split (`Iterable`, *optional*):
+ Collection of tokens which will never be split during tokenization. Only has an effect when
+ `do_basic_tokenize=True`
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ x_sep_token (`str`, *optional*, defaults to `"[X_SEP]"`):
+ Special second separator token, which can be generated by [`ProphetNetForConditionalGeneration`]. It is
+ used to separate bullet-point like sentences in summarization, *e.g.*.
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
+ The token used for padding, for example when batching sequences of different lengths.
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters.
+
+ This should likely be deactivated for Japanese (see this
+ [issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+
+ # first name has to correspond to main model input name
+ # to make sure `tokenizer.pad(...)` works correctly
+ # `ProphetNet` doesn't have `token_type_ids` as argument.
+ model_input_names: List[str] = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file: str,
+ do_lower_case: Optional[bool] = True,
+ do_basic_tokenize: Optional[bool] = True,
+ never_split: Optional[Iterable] = None,
+ unk_token: Optional[str] = "[UNK]",
+ sep_token: Optional[str] = "[SEP]",
+ x_sep_token: Optional[str] = "[X_SEP]",
+ pad_token: Optional[str] = "[PAD]",
+ mask_token: Optional[str] = "[MASK]",
+ tokenize_chinese_chars: Optional[bool] = True,
+ strip_accents: Optional[bool] = None,
+ **kwargs,
+ ):
+ if not os.path.isfile(vocab_file):
+ raise ValueError(
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
+ " model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
+ )
+ self.vocab = load_vocab(vocab_file)
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
+ self.do_basic_tokenize = do_basic_tokenize
+ if do_basic_tokenize:
+ self.basic_tokenizer = BasicTokenizer(
+ do_lower_case=do_lower_case,
+ never_split=never_split,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ )
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
+
+ super().__init__(
+ do_lower_case=do_lower_case,
+ do_basic_tokenize=do_basic_tokenize,
+ never_split=never_split,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ x_sep_token=x_sep_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ **kwargs,
+ )
+
+ @property
+ def vocab_size(self):
+ return len(self.vocab)
+
+ def get_vocab(self):
+ return dict(self.vocab, **self.added_tokens_encoder)
+
+ def _tokenize(self, text):
+ split_tokens = []
+ if self.do_basic_tokenize:
+ for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
+ # If the token is part of the never_split set
+ if token in self.basic_tokenizer.never_split:
+ split_tokens.append(token)
+ else:
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
+ else:
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
+ return split_tokens
+
+ def _convert_token_to_id(self, token: str):
+ """Converts a token (str) in an id using the vocab."""
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
+
+ def _convert_id_to_token(self, index: int):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.ids_to_tokens.get(index, self.unk_token)
+
+ def convert_tokens_to_string(self, tokens: str):
+ """Converts a sequence of tokens (string) in a single string."""
+ out_string = " ".join(tokens).replace(" ##", "").strip()
+ return out_string
+
+ def get_special_tokens_mask(
+ self,
+ token_ids_0: List[int],
+ token_ids_1: Optional[List[int]] = None,
+ already_has_special_tokens: Optional[bool] = False,
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is None:
+ return ([0] * len(token_ids_0)) + [1]
+ return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A ProphetNet
+ sequence pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ if token_ids_1 is None:
+ return len(token_ids_0 + sep) * [0]
+ return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ index = 0
+ if os.path.isdir(save_directory):
+ vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+ else:
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
+ with open(vocab_file, "w", encoding="utf-8") as writer:
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
+ if index != token_index:
+ logger.warning(
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
+ " Please check that the vocabulary is not corrupted!"
+ )
+ index = token_index
+ writer.write(token + "\n")
+ index += 1
+ return (vocab_file,)
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A BERT sequence has the following format:
+
+ - single sequence: `[CLS] X [SEP]`
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ if token_ids_1 is None:
+ return token_ids_0 + [self.sep_token_id]
+ sep = [self.sep_token_id]
+ return token_ids_0 + sep + token_ids_1 + sep
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b2d99e362be5fff7ed3b01d73f5faedcff7a8d12
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/convert_roberta_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/convert_roberta_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..32269777c8f2ba6ec45deda949ffd70c152931b1
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/convert_roberta_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/modeling_flax_roberta.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/modeling_flax_roberta.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1e8a88d30fd09d4809aaacc2964d84a02cd04a41
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/modeling_flax_roberta.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/modeling_roberta.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/modeling_roberta.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..279807d9e6702cd086133b7fca481de56d0b075e
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/modeling_roberta.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/modeling_tf_roberta.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/modeling_tf_roberta.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ad095f640c97c03318f2f0dbaba3adc8a10b0b37
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/modeling_tf_roberta.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/tokenization_roberta.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/tokenization_roberta.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..414db82f485a6579906fc4ccfc07b7c8c2912b94
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/tokenization_roberta.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff14e5b987a789c86f3ca37e11d79afe540a177e
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__init__.py
@@ -0,0 +1,78 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available
+
+
+_import_structure = {
+ "configuration_xlm_prophetnet": ["XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMProphetNetConfig"],
+}
+
+try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_xlm_prophetnet"] = ["XLMProphetNetTokenizer"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_xlm_prophetnet"] = [
+ "XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "XLMProphetNetDecoder",
+ "XLMProphetNetEncoder",
+ "XLMProphetNetForCausalLM",
+ "XLMProphetNetForConditionalGeneration",
+ "XLMProphetNetModel",
+ "XLMProphetNetPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_xlm_prophetnet import XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMProphetNetConfig
+
+ try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_xlm_prophetnet import XLMProphetNetTokenizer
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_xlm_prophetnet import (
+ XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST,
+ XLMProphetNetDecoder,
+ XLMProphetNetEncoder,
+ XLMProphetNetForCausalLM,
+ XLMProphetNetForConditionalGeneration,
+ XLMProphetNetModel,
+ XLMProphetNetPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__pycache__/configuration_xlm_prophetnet.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__pycache__/configuration_xlm_prophetnet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..55963d20a6f5af4f2616db3108e27d16baa90042
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__pycache__/configuration_xlm_prophetnet.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__pycache__/modeling_xlm_prophetnet.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__pycache__/modeling_xlm_prophetnet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..793d9566356bf3e9ba56946c23eb682076ae16b7
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__pycache__/modeling_xlm_prophetnet.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..f1a903c227bf596c9bebaa610251b9ef5c68aa44
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py
@@ -0,0 +1,182 @@
+# coding=utf-8
+# Copyright 2020 The Microsoft Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" XLM-ProphetNet model configuration"""
+
+
+from typing import Callable, Optional, Union
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class XLMProphetNetConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`XLMProphetNetModel`]. It is used to instantiate a
+ XLMProphetNet model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the XLMProphetNet
+ [microsoft/xprophetnet-large-wiki100-cased](https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased)
+ architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ activation_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for activations inside the fully connected layer.
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ vocab_size (`int`, *optional*, defaults to 30522):
+ Vocabulary size of the ProphetNET model. Defines the number of different tokens that can be represented by
+ the `inputs_ids` passed when calling [`XLMProphetNetModel`].
+ hidden_size (`int`, *optional*, defaults to 1024):
+ Dimensionality of the layers and the pooler layer.
+ encoder_ffn_dim (`int`, *optional*, defaults to 4096):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
+ num_encoder_layers (`int`, *optional*, defaults to 12):
+ Number of encoder layers.
+ num_encoder_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ decoder_ffn_dim (`int`, *optional*, defaults to 4096):
+ Dimensionality of the `intermediate` (often named feed-forward) layer in decoder.
+ num_decoder_layers (`int`, *optional*, defaults to 12):
+ Number of decoder layers.
+ num_decoder_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer decoder.
+ attention_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ init_std (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ add_cross_attention (`bool`, *optional*, defaults to `True`):
+ Whether cross-attention layers should be added to the model.
+ is_encoder_decoder (`bool`, *optional*, defaults to `True`):
+ Whether this is an encoder/decoder model.
+ pad_token_id (`int`, *optional*, defaults to 1)
+ Padding token id.
+ bos_token_id (`int`, *optional*, defaults to 0)
+ Beginning of stream token id.
+ eos_token_id (`int`, *optional*, defaults to 2)
+ End of stream token id.
+ ngram (`int`, *optional*, defaults to 2)
+ Number of future tokens to predict. Set to 1 to be same as traditional Language model to predict next first
+ token.
+ num_buckets (`int`, *optional*, defaults to 32)
+ The number of buckets to use for each attention layer. This is for relative position calculation. See the
+ [T5 paper](see https://arxiv.org/abs/1910.10683) for more details.
+ relative_max_distance (`int`, *optional*, defaults to 128)
+ Relative distances greater than this number will be put into the last same bucket. This is for relative
+ position calculation. See the [T5 paper](see https://arxiv.org/abs/1910.10683) for more details.
+ disable_ngram_loss (`bool`, *optional*, defaults to `False`):
+ Whether be trained predicting only the next first token.
+ eps (`float`, *optional*, defaults to 0.0):
+ Controls the `epsilon` parameter value for label smoothing in the loss calculation. If set to 0, no label
+ smoothing is performed.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models).
+ """
+
+ model_type = "xlm-prophetnet"
+ keys_to_ignore_at_inference = ["past_key_values"]
+ attribute_map = {
+ "num_attention_heads": "num_encoder_attention_heads",
+ }
+
+ def __init__(
+ self,
+ activation_dropout: Optional[float] = 0.1,
+ activation_function: Optional[Union[str, Callable]] = "gelu",
+ vocab_size: Optional[int] = 30522,
+ hidden_size: Optional[int] = 1024,
+ encoder_ffn_dim: Optional[int] = 4096,
+ num_encoder_layers: Optional[int] = 12,
+ num_encoder_attention_heads: Optional[int] = 16,
+ decoder_ffn_dim: Optional[int] = 4096,
+ num_decoder_layers: Optional[int] = 12,
+ num_decoder_attention_heads: Optional[int] = 16,
+ attention_dropout: Optional[float] = 0.1,
+ dropout: Optional[float] = 0.1,
+ max_position_embeddings: Optional[int] = 512,
+ init_std: Optional[float] = 0.02,
+ is_encoder_decoder: Optional[bool] = True,
+ add_cross_attention: Optional[bool] = True,
+ decoder_start_token_id: Optional[int] = 0,
+ ngram: Optional[int] = 2,
+ num_buckets: Optional[int] = 32,
+ relative_max_distance: Optional[int] = 128,
+ disable_ngram_loss: Optional[bool] = False,
+ eps: Optional[float] = 0.0,
+ use_cache: Optional[bool] = True,
+ pad_token_id: Optional[int] = 0,
+ bos_token_id: Optional[int] = 1,
+ eos_token_id: Optional[int] = 2,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.encoder_ffn_dim = encoder_ffn_dim
+ self.num_encoder_layers = num_encoder_layers
+ self.num_encoder_attention_heads = num_encoder_attention_heads
+ self.decoder_ffn_dim = decoder_ffn_dim
+ self.num_decoder_layers = num_decoder_layers
+ self.num_decoder_attention_heads = num_decoder_attention_heads
+ self.max_position_embeddings = max_position_embeddings
+ self.init_std = init_std # Normal(0, this parameter)
+ self.activation_function = activation_function
+
+ # parameters for xlmprophetnet
+ self.ngram = ngram
+ self.num_buckets = num_buckets
+ self.relative_max_distance = relative_max_distance
+ self.disable_ngram_loss = disable_ngram_loss
+ self.eps = eps
+
+ # 3 Types of Dropout
+ self.attention_dropout = attention_dropout
+ self.activation_dropout = activation_dropout
+ self.dropout = dropout
+
+ self.use_cache = use_cache
+
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ is_encoder_decoder=is_encoder_decoder,
+ add_cross_attention=add_cross_attention,
+ decoder_start_token_id=decoder_start_token_id,
+ **kwargs,
+ )
+
+ @property
+ def num_hidden_layers(self) -> int:
+ return self.num_encoder_layers + self.num_decoder_layers
+
+ @num_hidden_layers.setter
+ def num_hidden_layers(self, value):
+ raise NotImplementedError(
+ "This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
+ " `num_decoder_layers`."
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..53b8a1fc20cbb595f8e038e8464acde34132d771
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py
@@ -0,0 +1,2366 @@
+# coding=utf-8
+# Copyright 2020 The Microsoft Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch XLM-ProphetNet model."""
+
+
+import copy
+import math
+import warnings
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import Tensor, nn
+from torch.nn import LayerNorm
+
+from ...activations import ACT2FN
+from ...modeling_outputs import BaseModelOutput
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_xlm_prophetnet import XLMProphetNetConfig
+
+
+logger = logging.get_logger(__name__)
+
+
+_CONFIG_FOR_DOC = "XLMProphetNetConfig"
+
+
+from ..deprecated._archive_maps import XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+# Copied from src.transformers.models.prophetnet.modeling_prophetnet.PROPHETNET_START_DOCSTRING with ProphetNetConfig->XLMProphetNetConfig
+XLM_PROPHETNET_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ Original ProphetNet code can be found [here](https://github.com/microsoft/ProphetNet). Checkpoints were converted
+ from original Fairseq checkpoints. For more information on the checkpoint conversion, please take a look at the
+ file `convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py`.
+
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matters related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`XLMProphetNetConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+# Copied from src.transformers.models.prophetnet.modeling_prophetnet.PROPHETNET_INPUTS_DOCSTRING with ProphetNet->XLMProphetNet
+XLM_PROPHETNET_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ XLMProphetNet uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+# Copied from src.transformers.models.prophetnet.modeling_prophetnet.PROPHETNET_STANDALONE_INPUTS_DOCSTRING with ProphetNet->XLMProphetNet
+XLM_PROPHETNET_STANDALONE_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+# Copied from transformers.models.prophetnet.modeling_prophetnet.softmax
+def softmax(hidden_state, dim, onnx_trace=False):
+ if onnx_trace:
+ return nn.functional.softmax(hidden_state.float(), dim=dim)
+ else:
+ return nn.functional.softmax(hidden_state, dim=dim, dtype=torch.float32)
+
+
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ngram_attention_bias
+def ngram_attention_bias(sequence_length, ngram, device, dtype):
+ """
+ This function computes the bias for the predict stream
+ """
+ left_block = (
+ torch.ones((ngram, sequence_length, sequence_length), device=device, dtype=dtype) * torch.finfo(dtype).min
+ )
+ right_block = left_block.detach().clone()
+ # create bias
+ for stream_idx in range(ngram):
+ right_block[stream_idx].fill_diagonal_(0, wrap=False)
+ left_block[stream_idx].triu_(-stream_idx + 1)
+
+ left_block[:, :, 0] = 0
+ return torch.cat([left_block, right_block], dim=2)
+
+
+# Copied from transformers.models.prophetnet.modeling_prophetnet.compute_relative_buckets
+def compute_relative_buckets(num_buckets, max_distance, relative_positions, is_bidirectional=False):
+ """
+ This function computes individual parts of the relative position buckets. For more detail, see paper.
+ """
+ inv_relative_positions = -relative_positions
+ rel_positions_bucket = 0
+
+ if is_bidirectional:
+ num_buckets = num_buckets // 2
+ rel_positions_bucket = (
+ rel_positions_bucket
+ + torch.lt(inv_relative_positions, torch.zeros_like(inv_relative_positions)).int() * num_buckets
+ )
+ inv_relative_positions = torch.abs(inv_relative_positions)
+ else:
+ inv_relative_positions = torch.max(inv_relative_positions, torch.zeros_like(inv_relative_positions))
+
+ max_exact = num_buckets // 2
+ is_small = torch.lt(inv_relative_positions, max_exact)
+ val_if_large = max_exact + torch.log(inv_relative_positions.float() / max_exact) / math.log(
+ max_distance / max_exact
+ ) * (num_buckets - max_exact)
+ val_if_large = torch.min(val_if_large, torch.ones_like(val_if_large) * (num_buckets - 1)).int()
+ rel_positions_bucket = rel_positions_bucket + torch.where(is_small, inv_relative_positions.int(), val_if_large)
+ return rel_positions_bucket
+
+
+# Copied from transformers.models.prophetnet.modeling_prophetnet.compute_all_stream_relative_buckets
+def compute_all_stream_relative_buckets(num_buckets, max_distance, position_ids):
+ """
+ This function computes both main and predict relative position buckets. For more detail, see paper.
+ """
+ # main stream
+ main_stream_relative_positions = position_ids.unsqueeze(1).repeat(1, position_ids.size(-1), 1)
+ main_stream_relative_positions = main_stream_relative_positions - position_ids.unsqueeze(-1)
+
+ # predicting stream
+ predicting_stream_relative_positions = torch.cat((position_ids - 1, position_ids), dim=-1).unsqueeze(1)
+ predicting_stream_relative_positions = predicting_stream_relative_positions.repeat(1, position_ids.size(-1), 1)
+ predicting_stream_relative_positions = predicting_stream_relative_positions - position_ids.unsqueeze(-1)
+
+ # get both position buckets
+ main_relative_position_buckets = compute_relative_buckets(
+ num_buckets, max_distance, main_stream_relative_positions, is_bidirectional=False
+ )
+ predict_relative_position_buckets = compute_relative_buckets(
+ num_buckets, max_distance, predicting_stream_relative_positions, is_bidirectional=False
+ )
+ return main_relative_position_buckets, predict_relative_position_buckets
+
+
+@dataclass
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput with ProphetNet->XLMProphetNet all-casing
+class XLMProphetNetSeq2SeqLMOutput(ModelOutput):
+ """
+ Base class for sequence-to-sequence language models outputs.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, config.vocab_size)`):
+ Prediction scores of the main stream language modeling head (scores for each vocabulary token before
+ SoftMax).
+ logits_ngram (`torch.FloatTensor` of shape `(batch_size, ngram * decoder_sequence_length, config.vocab_size)`):
+ Prediction scores of the predict stream language modeling head (scores for each vocabulary token before
+ SoftMax).
+ past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size,
+ num_attn_heads, decoder_sequence_length, embed_size_per_head)`).
+
+ Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
+ used (see `past_key_values` input) to speed up sequential decoding.
+ decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, decoder_sequence_length, hidden_size)`.
+
+ Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.
+ decoder_ngram_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`.
+
+ Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding
+ outputs.
+ decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ decoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ decoder_ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ decoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the
+ weighted average in the self-attention heads.
+ cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ encoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to
+ compute the weighted average in the
+ encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
+ encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, encoder_sequence_length, hidden_size)`.
+
+ Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
+ encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ encoder_sequence_length, encoder_sequence_length)`. Attentions weights of the encoder, after the attention
+ softmax, used to compute the weighted average in the self-attention heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ logits_ngram: Optional[torch.FloatTensor] = None
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_ngram_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ encoder_last_hidden_state: Optional[torch.FloatTensor] = None
+ encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+ @property
+ def decoder_cross_attentions(self):
+ warnings.warn(
+ "`decoder_cross_attentions` is deprecated and will be removed soon. Please use `cross_attentions`"
+ " instead.",
+ FutureWarning,
+ )
+ return self.cross_attentions
+
+
+@dataclass
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput with ProphetNet->XLMProphetNet all-casing
+class XLMProphetNetSeq2SeqModelOutput(ModelOutput):
+ """
+ Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential
+ decoding.
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, hidden_size)`):
+ Sequence of main stream hidden-states at the output of the last layer of the decoder of the model.
+
+ If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
+ hidden_size)` is output.
+ last_hidden_state_ngram (`torch.FloatTensor` of shape `(batch_size,ngram * decoder_sequence_length, config.vocab_size)`, *optional*):
+ Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model.
+ past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size,
+ num_attn_heads, decoder_sequence_length, embed_size_per_head)`).
+
+ Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
+ used (see `past_key_values` input) to speed up sequential decoding.
+ decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, decoder_sequence_length, hidden_size)`.
+
+ Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.
+ decoder_ngram_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`.
+
+ Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding
+ outputs.
+ decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ decoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ decoder_ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ decoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the
+ weighted average in the
+ cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ encoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to
+ compute the weighted average in the
+ encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
+ encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, encoder_sequence_length, hidden_size)`.
+
+ Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
+ encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ encoder_sequence_length, encoder_sequence_length)`.
+
+ Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ """
+
+ last_hidden_state: torch.FloatTensor
+ last_hidden_state_ngram: Optional[torch.FloatTensor] = None
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_ngram_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ encoder_last_hidden_state: Optional[torch.FloatTensor] = None
+ encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+ @property
+ def decoder_cross_attentions(self):
+ warnings.warn(
+ "`decoder_cross_attentions` is deprecated and will be removed soon. Please use `cross_attentions`"
+ " instead.",
+ FutureWarning,
+ )
+ return self.cross_attentions
+
+
+@dataclass
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput with ProphetNet->XLMProphetNet all-casing
+class XLMProphetNetDecoderModelOutput(ModelOutput):
+ """
+ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, hidden_size)`):
+ Sequence of main stream hidden-states at the output of the last layer of the decoder of the model.
+
+ If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
+ hidden_size)` is output.
+ last_hidden_state_ngram (`torch.FloatTensor` of shape `(batch_size, ngram * decoder_sequence_length, config.vocab_size)`):
+ Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model.
+ past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size,
+ num_attn_heads, decoder_sequence_length, embed_size_per_head)`).
+
+ Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
+ used (see `past_key_values` input) to speed up sequential decoding.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, decoder_sequence_length, hidden_size)`.
+
+ Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.
+ ngram_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`.
+
+ Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding
+ outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ decoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ decoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the
+ weighted average in the
+ cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ encoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to
+ compute the weighted average in the
+ """
+
+ last_hidden_state: torch.FloatTensor
+ last_hidden_state_ngram: Optional[torch.FloatTensor] = None
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ hidden_states_ngram: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+ ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput with ProphetNet->XLMProphetNet all-casing
+class XLMProphetNetDecoderLMOutput(ModelOutput):
+ """
+ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, config.vocab_size)`):
+ Prediction scores of the main stream language modeling head (scores for each vocabulary token before
+ SoftMax).
+ logits_ngram (`torch.FloatTensor` of shape `(batch_size, ngram * decoder_sequence_length, config.vocab_size)`):
+ Prediction scores of the predict stream language modeling head (scores for each vocabulary token before
+ SoftMax).
+ past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size,
+ num_attn_heads, decoder_sequence_length, embed_size_per_head)`).
+
+ Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be
+ used (see `past_key_values` input) to speed up sequential decoding.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, decoder_sequence_length, hidden_size)`.
+
+ Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.
+ ngram_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`.
+
+ Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding
+ outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ decoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ decoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the
+ weighted average in the
+ cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,
+ encoder_sequence_length, decoder_sequence_length)`.
+
+ Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to
+ compute the weighted average in the
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ logits_ngram: Optional[torch.FloatTensor] = None
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ hidden_states_ngram: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+ ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetPreTrainedModel with ProphetNet->XLMProphetNet
+class XLMProphetNetPreTrainedModel(PreTrainedModel):
+ config_class = XLMProphetNetConfig
+ base_model_prefix = "prophetnet"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=self.config.init_std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.init_std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+ def _shift_right(self, input_ids):
+ decoder_start_token_id = self.config.decoder_start_token_id
+ pad_token_id = self.config.pad_token_id
+
+ assert decoder_start_token_id is not None, (
+ "self.model.config.decoder_start_token_id has to be defined. In XLMProphetNet it is usually set to the"
+ " pad_token_id. See XLMProphetNet docs for more information"
+ )
+
+ # shift inputs to the right
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
+ shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
+ shifted_input_ids[..., 0] = decoder_start_token_id
+
+ assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
+ # replace possible -100 values in labels by `pad_token_id`
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
+
+ assert torch.all(shifted_input_ids >= 0).item(), "Verify that `shifted_input_ids` has only positive values"
+
+ return shifted_input_ids
+
+
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetPositionalEmbeddings with ProphetNet->XLMProphetNet
+class XLMProphetNetPositionalEmbeddings(nn.Embedding):
+ """
+ This module learns positional embeddings up to a fixed maximum size. Padding ids are ignored by either offsetting
+ based on padding_idx or by setting padding_idx to None and ensuring that the appropriate position ids are passed to
+ the forward function.
+ """
+
+ def __init__(self, config: XLMProphetNetConfig) -> None:
+ self.max_length = config.max_position_embeddings
+ super().__init__(config.max_position_embeddings, config.hidden_size, config.pad_token_id)
+
+ def forward(self, inputs_shape, device, attention_mask=None, past_key_values=None, position_ids=None):
+ assert (position_ids is None) or (
+ self.padding_idx is None
+ ), "If position_ids is pre-computed then padding_idx should not be set."
+
+ if position_ids is None:
+ if past_key_values is not None:
+ # position_ids is the same for every token when decoding a single step
+ # Without the int() cast, it doesn't work in some cases when exporting to ONNX
+ prev_num_input_ids = past_key_values[0][0].shape[2]
+ num_input_ids = inputs_shape[1] + prev_num_input_ids
+ position_ids = torch.ones((1, 1), dtype=torch.long, device=device) * (
+ int(self.padding_idx + num_input_ids)
+ )
+ else:
+ if attention_mask is None:
+ attention_mask = torch.ones(inputs_shape, dtype=torch.long, device=device)
+
+ # retrieve position_ids from input_ids / attention_mask
+ position_ids = (
+ torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask
+ ).long() + self.padding_idx
+
+ # make sure position_ids are not bigger then max_length
+ position_ids = position_ids.clamp(0, self.max_length - 1)
+
+ return super().forward(position_ids), position_ids
+
+ def _forward(self, position_ids):
+ return super().forward(position_ids)
+
+
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetAttention with ProphetNet->XLMProphetNet
+class XLMProphetNetAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(
+ self,
+ config: XLMProphetNetConfig,
+ num_attn_heads: int,
+ ):
+ super().__init__()
+ hidden_size = config.hidden_size
+
+ self.attention_dropout = config.attention_dropout
+ self.dropout = config.dropout
+ self.num_attn_heads = num_attn_heads
+ self.head_dim = hidden_size // num_attn_heads
+
+ assert self.head_dim * num_attn_heads == hidden_size, (
+ "`config.hidden_size` must be divisible by `config.num_encoder_attention_heads` and"
+ " `config.num_decoder_attention_heads`"
+ )
+
+ self.key_proj = nn.Linear(hidden_size, hidden_size)
+ self.value_proj = nn.Linear(hidden_size, hidden_size)
+ self.query_proj = nn.Linear(hidden_size, hidden_size)
+
+ self.out_proj = nn.Linear(hidden_size, hidden_size)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_attn_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states,
+ key_value_states: Optional[Tensor] = None,
+ attention_mask: Optional[Tensor] = None,
+ layer_head_mask: Optional[Tensor] = None,
+ past_key_value: Optional[Tuple[Tensor]] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[Tensor, Optional[Tensor]]:
+ batch_size, tgt_len, hidden_size = hidden_states.size()
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+ assert list(hidden_states.size()) == [
+ batch_size,
+ tgt_len,
+ hidden_size,
+ ], f"Size of hidden states should be {batch_size, tgt_len, hidden_size}, but is {hidden_states.size()}"
+
+ # previous time steps are cached - no need to recompute key and value if they are static
+ query_states = self.query_proj(hidden_states) / (self.head_dim**0.5)
+
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.key_proj(key_value_states), -1, batch_size)
+ value_states = self._shape(self.value_proj(key_value_states), -1, batch_size)
+ else:
+ # self_attention
+ key_states = self._shape(self.key_proj(hidden_states), -1, batch_size)
+ value_states = self._shape(self.value_proj(hidden_states), -1, batch_size)
+
+ if is_cross_attention:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ # project states into the correct shape
+ proj_shape = (batch_size, self.num_attn_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, batch_size).view(*proj_shape)
+ key_states = key_states.view(*proj_shape)
+ value_states = value_states.view(*proj_shape)
+ src_len = key_states.size(2)
+ attn_weights = torch.einsum("bsij,bsjk->bsik", query_states, key_states.transpose(2, 3))
+ expected_shape = (batch_size, self.num_attn_heads, tgt_len, src_len)
+ if attn_weights.size() != expected_shape:
+ raise ValueError(f"Attention weights should have size {expected_shape}, but is {attn_weights.size()}")
+
+ # This is part of a workaround to get around fork/join parallelism not supporting Optional types.
+ if attention_mask is not None and attention_mask.dim() == 0:
+ attention_mask = None
+
+ expected_shape = (batch_size, self.num_attn_heads, 1, src_len)
+ if attention_mask is not None and attention_mask.size() != expected_shape:
+ raise ValueError(f"Attention mask should have size {expected_shape}, but is {attention_mask.size()}")
+ if attention_mask is not None: # don't attend to padding symbols
+ attn_weights = attn_weights + attention_mask
+ if output_attentions:
+ attn_weights_reshaped = attn_weights
+ else:
+ attn_weights_reshaped = None
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if layer_head_mask is not None:
+ assert layer_head_mask.size() == (self.num_attn_heads,), (
+ f"Head mask for a single layer should be of size {(self.num_attn_heads,)}, but is"
+ f" {layer_head_mask.size()}"
+ )
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(
+ batch_size, self.num_attn_heads, tgt_len, src_len
+ )
+
+ # apply head_mask also on attn_weights_reshaped which is used for n-gram attention inside the model
+ attn_weights_reshaped = layer_head_mask.view(1, -1, 1, 1) * attn_weights_reshaped
+
+ attn_probs = nn.functional.dropout(
+ attn_weights,
+ p=self.attention_dropout,
+ training=self.training,
+ )
+ attn_output = torch.einsum("bsij,bsjk->bsik", attn_probs, value_states)
+ expected_shape = (batch_size, self.num_attn_heads, tgt_len, self.head_dim)
+ if attn_output.size() != expected_shape:
+ raise ValueError(f"`attn_output` should have shape {expected_shape}, but is of shape {attn_output.size()}")
+
+ attn_output = attn_output.transpose(1, 2).reshape(batch_size, tgt_len, hidden_size)
+ attn_output = self.out_proj(attn_output)
+
+ attn_output = nn.functional.dropout(attn_output, p=self.dropout, training=self.training)
+ return attn_output, attn_weights_reshaped, past_key_value
+
+
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetFeedForward with ProphetNet->XLMProphetNet
+class XLMProphetNetFeedForward(nn.Module):
+ """
+ This is the residual two feed-forward layer block based on the original Transformer implementation.
+ """
+
+ def __init__(self, config: XLMProphetNetConfig, ffn_dim: int):
+ super().__init__()
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.intermediate = nn.Linear(config.hidden_size, ffn_dim)
+ self.output = nn.Linear(ffn_dim, config.hidden_size)
+ self.activation_dropout = config.activation_dropout
+ self.dropout = config.dropout
+
+ def forward(self, hidden_states):
+ hidden_states = self.intermediate(hidden_states)
+ hidden_states = self.activation_fn(hidden_states)
+
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+ hidden_states = self.output(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ return hidden_states
+
+
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetNgramSelfAttention with ProphetNet->XLMProphetNet
+class XLMProphetNetNgramSelfAttention(nn.Module):
+ def __init__(self, config: XLMProphetNetConfig):
+ super().__init__()
+ self.hidden_size = config.hidden_size
+
+ self.num_buckets = config.num_buckets
+ self.relative_max_distance = config.relative_max_distance
+ self.num_attn_heads = config.num_decoder_attention_heads
+ self.dropout = config.dropout
+ self.attention_dropout = config.attention_dropout
+ self.head_dim = config.hidden_size // self.num_attn_heads
+ self.ngram = config.ngram
+
+ assert (
+ self.head_dim * self.num_attn_heads == config.hidden_size
+ ), "config.hidden_size must be divisible by num_attn_heads"
+ # key, value, query projection
+ self.key_proj = nn.Linear(config.hidden_size, config.hidden_size)
+ self.value_proj = nn.Linear(config.hidden_size, config.hidden_size)
+ self.query_proj = nn.Linear(config.hidden_size, config.hidden_size)
+
+ # out projection
+ self.out_proj = nn.Linear(config.hidden_size, config.hidden_size)
+
+ # rel position embeddings
+ self.relative_pos_embeddings = nn.Linear(config.hidden_size, self.num_buckets * self.num_attn_heads)
+
+ # for onnx runtime
+ self.onnx_trace = False
+
+ def _shape(self, tensor, seq_len, batch_size):
+ return tensor.view(batch_size, seq_len, self.num_attn_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def prepare_for_onnx_export_(self):
+ self.onnx_trace = True
+
+ def forward(
+ self,
+ hidden_states,
+ past_key_value: Optional[Tuple[Tensor]] = None,
+ attention_mask=None,
+ layer_head_mask=None,
+ extended_predict_attention_mask=None,
+ main_relative_position_buckets=None,
+ predict_relative_position_buckets=None,
+ position_ids=None,
+ ):
+ batch_size, ngram_sequence_length, hidden_size = hidden_states.size()
+ assert list(hidden_states.size()) == [batch_size, ngram_sequence_length, hidden_size], (
+ f"`hidden_states` should be of shape {batch_size, ngram_sequence_length, hidden_size}, but is of shape"
+ f" {hidden_states.shape}"
+ )
+
+ # project
+ query_states = self.query_proj(hidden_states)
+ key_states = self.key_proj(hidden_states)
+ value_states = self.value_proj(hidden_states)
+
+ # normalize
+ query_states = query_states / (self.head_dim**0.5)
+
+ # reshape
+ query_states = self._shape(query_states, ngram_sequence_length, batch_size)
+ key_states = self._shape(key_states, -1, batch_size)
+ value_states = self._shape(value_states, -1, batch_size)
+ proj_shape = (batch_size, self.num_attn_heads, -1, self.head_dim)
+
+ query_states = query_states.view(*proj_shape)
+ key_states = key_states.view(*proj_shape)
+ value_states = value_states.view(*proj_shape)
+
+ # chunk into main stream and predict stream
+ hidden_states_list = hidden_states.chunk(1 + self.ngram, dim=1)
+ query_states_list = query_states.chunk(1 + self.ngram, dim=2)
+ key_states_list = key_states.chunk(1 + self.ngram, dim=2)
+ value_states_list = value_states.chunk(1 + self.ngram, dim=2)
+
+ main_hidden_states, hidden_states_predict_list = hidden_states_list[0], hidden_states_list[1:]
+ main_query_states, predict_query_states_list = query_states_list[0], query_states_list[1:]
+ main_key_states, predict_key_states_list = key_states_list[0], key_states_list[1:]
+ main_value_states, predict_value_states_list = value_states_list[0], value_states_list[1:]
+
+ # saved states are stored with shape (batch_size, num_attn_heads, seq_len, head_dim)
+ if past_key_value is not None:
+ prev_main_key_states = past_key_value[0]
+ main_key_states = torch.cat((prev_main_key_states, main_key_states), dim=2)
+ prev_main_value_states = past_key_value[1]
+ main_value_states = torch.cat((prev_main_value_states, main_value_states), dim=2)
+
+ # Update cache
+ past_key_value = (main_key_states, main_value_states)
+
+ # get seq_length of main stream only
+ sequence_length = ngram_sequence_length // (1 + self.ngram)
+
+ # MAIN-STREAM
+ # main attn weights
+ # [batch_size, number_heads, sequence_length, head_dimesion]
+ # x [batch_size, number_heads, head_dimesion, sequence_length]
+ # -> [batch_size, number_heads, sequence_length, sequence_length]
+ main_attn_weights = torch.einsum("bntc,bncs->bnts", main_query_states, main_key_states.transpose(2, 3))
+
+ # retrieve relative position embeddings for each layer -> see paper for more details
+ main_relative_pos_embeddings = self.get_main_relative_pos_embeddings(
+ main_hidden_states, main_attn_weights, position_ids, main_relative_position_buckets
+ )
+
+ main_attn_weights = main_attn_weights + main_relative_pos_embeddings
+
+ if attention_mask is not None:
+ main_attn_weights = main_attn_weights + attention_mask
+
+ main_attn_probs = softmax(
+ main_attn_weights,
+ dim=-1,
+ onnx_trace=self.onnx_trace,
+ ).type_as(main_attn_weights)
+
+ if layer_head_mask is not None:
+ assert layer_head_mask.size() == (self.num_attn_heads,), (
+ f"Head mask for a single layer should be of size {(self.num_attn_heads,)}, but is"
+ f" {layer_head_mask.size()}"
+ )
+ main_attn_probs = layer_head_mask.view(1, -1, 1, 1) * main_attn_probs.view(
+ batch_size, self.num_attn_heads, -1, sequence_length
+ )
+
+ main_attn_probs = nn.functional.dropout(main_attn_probs, p=self.attention_dropout, training=self.training)
+ # project to attn_output
+ # [batch_size, number_heads, sequence_length, sequence_length]
+ # x [batch_size, number_heads, sequence_length, head_dimesion]
+ # -> [batch_size, number_heads, sequence_length, head_dimesion]
+ main_attn_output = torch.einsum("bntc,bncs->bnts", main_attn_probs, main_value_states)
+ # reshape so that num_heads dim is merged into last `head_dim` axis
+ main_attn_output = main_attn_output.transpose(1, 2).reshape(batch_size, 1, sequence_length, hidden_size)
+ main_attn_output = self.out_proj(main_attn_output)
+
+ # PREDICT-STREAM
+ # [batch_size, ngram, number_heads, sequence_length, head_dimesion]
+ predict_query_states = torch.stack(predict_query_states_list, 1).view(
+ batch_size, self.ngram, self.num_attn_heads, sequence_length, self.head_dim
+ )
+
+ # [batch_size, ngram, number_heads, 2*sequence_length, head_dimesion]
+ predict_key_states = torch.stack([torch.cat([main_key_states, key], 2) for key in predict_key_states_list], 1)
+
+ # [batch_size, sequence_length, ngram, hidden_size]
+ predict_hidden_states = torch.stack(hidden_states_predict_list, dim=2)
+
+ # [batch_size, number_heads, ngram, 2*sequence_length, head_dimesion]
+ predict_value_states = torch.cat(
+ [torch.cat([main_value_states, v_p], 2).unsqueeze(2) for v_p in predict_value_states_list], 2
+ )
+
+ # [batch_size, ngram, number_heads, sequence_length, head_dimesion]
+ # x [batch_size, ngram, number_heads, 2*sequence_length, head_dimesion]
+ # -> [batch_size, ngram, number_heads, sequence_length, 2*sequence_length]
+ predict_attn_weights = torch.einsum("bnhtc,bnhsc->bnhts", (predict_query_states, predict_key_states))
+
+ # retrieve relative position embeddings for each layer -> see paper for more details
+ # [batch_size, ngram, number_heads, sequence_length, predict_relative_pos_embeddings]
+ predict_relative_pos_embeddings = self.get_predict_relative_pos_embeddings(
+ predict_hidden_states, predict_attn_weights, position_ids, predict_relative_position_buckets
+ )
+
+ # [batch_size, ngram, number_heads, sequence_length, 2*sequence_length]
+ predict_attn_weights = predict_attn_weights + predict_relative_pos_embeddings
+
+ if extended_predict_attention_mask is not None:
+ # Permuting Predict attention mask to [batch_size, ngram, number_heads, sequence_length, 2*sequence_length]
+ extended_predict_attention_mask = extended_predict_attention_mask.permute(0, 2, 1, 3, 4)
+ extended_predict_attention_mask = extended_predict_attention_mask.to(predict_attn_weights.dtype)
+ predict_attn_weights = predict_attn_weights + extended_predict_attention_mask
+
+ predict_attn_probs = softmax(
+ predict_attn_weights,
+ dim=-1,
+ onnx_trace=self.onnx_trace,
+ ).type_as(predict_attn_weights)
+
+ if layer_head_mask is not None:
+ assert layer_head_mask.size() == (self.num_attn_heads,), (
+ f"Head mask for a single layer should be of size {(self.num_attn_heads,)}, but is"
+ f" {layer_head_mask.size()}"
+ )
+ predict_attn_probs = layer_head_mask.view(1, 1, -1, 1, 1) * predict_attn_probs
+
+ predict_attn_probs = nn.functional.dropout(
+ predict_attn_probs, p=self.attention_dropout, training=self.training
+ )
+ # project to attention output
+ # [batch_size, ngram, number_heads, sequence_length, 2*sequence_length]
+ # x [batch_size, ngram, number_heads, 2*sequence_length, head_dimesion]
+ # -> [batch_size, ngram, number_heads, sequence_length, head_dimesion]
+ predict_attn_output = torch.einsum(
+ "bnhts,bnhsc->bnhtc", (predict_attn_probs, predict_value_states.transpose(1, 2))
+ )
+
+ # reshape so that num_heads dim is merged into last `head_dim` axis
+ # [batch_size, ngram, number_heads, sequence_length, head_dimesion] -> [batch_size, ngram, sequence_length, hidden_size]
+ predict_attn_output = predict_attn_output.transpose(2, 3)
+ predict_attn_output = predict_attn_output.reshape(batch_size, self.ngram, sequence_length, hidden_size)
+ predict_attn_output = self.out_proj(predict_attn_output)
+
+ # concat to single attn output
+ # [batch_size, (1+ngram)*sequence_length, hidden_size]
+ attn_output = torch.cat([main_attn_output, predict_attn_output], 1).view(batch_size, -1, hidden_size)
+ # reshape into better form for `config.output_attentions`
+ main_attn_probs = main_attn_probs.view(batch_size, self.num_attn_heads, sequence_length, -1)
+
+ attn_output = nn.functional.dropout(attn_output, p=self.dropout, training=self.training)
+
+ return attn_output, main_attn_probs, predict_attn_probs, past_key_value
+
+ def get_main_relative_pos_embeddings(
+ self, hidden_states, attn_weights, position_ids, main_relative_position_buckets
+ ):
+ # input hidden_states [batch_size, sequence_length, hidden_size]
+ # input attn_weights [batch_size, num_heads, sequence_length, sequence_length]
+ # input position_ids [batch_size, sequence_length] or [1,1]
+ batch_size, num_attn_heads, tgt_len, src_len = attn_weights.shape
+ attn_weights = attn_weights.view(batch_size, num_attn_heads, tgt_len, src_len)
+ if main_relative_position_buckets is None:
+ batch_size, sequence_length = hidden_states.shape[:2]
+ relative_positions = (
+ torch.arange(1, attn_weights.shape[-1] + 1)
+ .unsqueeze(0)
+ .unsqueeze(0)
+ .repeat(batch_size, sequence_length, 1)
+ .to(position_ids.device)
+ )
+ # [batch_size, sequence_length, sequence_length+1]
+ relative_positions = relative_positions - position_ids.unsqueeze(0).repeat(batch_size, sequence_length, 1)
+ main_relative_position_buckets = compute_relative_buckets(
+ self.num_buckets, self.relative_max_distance, relative_positions, False
+ )
+
+ # [batch_size, sequence_length, num_buckets * num_heads]
+ rel_pos_embeddings = self.relative_pos_embeddings(hidden_states)
+ rel_pos_embeddings = rel_pos_embeddings.view(
+ rel_pos_embeddings.shape[:2] + (self.num_buckets, self.num_attn_heads)
+ )
+ rel_pos_embeddings = rel_pos_embeddings.permute(0, 3, 1, 2)
+ # [batch_size, num_heads, sequence_length, num_buckets]
+ rel_pos_embeddings = rel_pos_embeddings.reshape(attn_weights.shape[:3] + (-1,))
+
+ main_relative_position_buckets = main_relative_position_buckets.repeat(1, self.num_attn_heads, 1)
+ # [batch_size * num_heads * sequence_length, sequence_length]
+ main_relative_position_buckets = main_relative_position_buckets.view(
+ -1, main_relative_position_buckets.shape[-1]
+ )
+ main_relative_position_buckets = main_relative_position_buckets.long()
+ # [batch_size * num_heads * sequence_length, sequence_length]
+ rel_pos_embeddings = rel_pos_embeddings.reshape(-1, rel_pos_embeddings.size(-1))
+
+ main_relative_pos_embeddings = torch.gather(rel_pos_embeddings, dim=1, index=main_relative_position_buckets)
+ main_relative_pos_embeddings = main_relative_pos_embeddings.view(batch_size, num_attn_heads, tgt_len, -1)
+ return main_relative_pos_embeddings
+
+ def get_predict_relative_pos_embeddings(
+ self, hidden_states, attn_weights, position_ids, predict_relative_position_buckets
+ ):
+ # input hidden_states [batch_size, sequence_length, ngram, hidden_size]
+ # input attn_weights [batch_size, ngram, num_heads, sequence_length, 2*sequence_length]
+ # input position_ids [batch_size, sequence_length] or [1,1]
+ # input predict_relative_position_buckets [batch_size, sequence_length, 2*sequence_length] or None
+ batch_size, sequence_length = hidden_states.shape[0:2]
+
+ if predict_relative_position_buckets is None:
+ key_sequence_length = attn_weights.shape[-1]
+ assert (
+ position_ids[0][0] == key_sequence_length - 1
+ ), "`position_ids` are incorrect. They should be of the format 1 2 3 4 5 ... (key_sequence_length - 1)"
+ relative_positions = (
+ torch.arange(0, key_sequence_length)
+ .unsqueeze(0)
+ .unsqueeze(0)
+ .repeat(batch_size, sequence_length, 1)
+ .to(position_ids.device)
+ )
+
+ relative_positions = relative_positions - position_ids.unsqueeze(0).repeat(batch_size, sequence_length, 1)
+ predict_relative_position_buckets = compute_relative_buckets(
+ self.num_buckets, self.relative_max_distance, relative_positions, False
+ )
+
+ # [batch_size, ngram, sequence_length, hidden_size]
+ hidden_states = hidden_states.transpose(1, 2)
+ rel_pos_embeddings = self.relative_pos_embeddings(hidden_states)
+
+ # [batch_size, ngram, sequence_length, num_buckets, num_heads]
+ rel_pos_embeddings = rel_pos_embeddings.view(
+ hidden_states.shape[:-1] + (self.num_buckets, self.num_attn_heads)
+ )
+ rel_pos_embeddings = rel_pos_embeddings.permute(0, 2, 1, 4, 3)
+ # [batch_size * ngram * sequence_length * num_heads, num_buckets]
+ rel_pos_embeddings = rel_pos_embeddings.reshape(-1, self.num_buckets)
+ # [ngram, batch_size, num_heads * sequence_length, -1]
+ predict_relative_position_buckets = predict_relative_position_buckets.unsqueeze(0)
+ predict_relative_position_buckets = predict_relative_position_buckets.repeat(
+ self.ngram, 1, self.num_attn_heads, 1
+ )
+ # [ngram * batch_size * num_heads * sequence_length, -1]
+ predict_relative_position_buckets = predict_relative_position_buckets.view(
+ -1, predict_relative_position_buckets.size(-1)
+ ).long()
+
+ predict_relative_pos_embeddings = torch.gather(
+ rel_pos_embeddings, dim=1, index=predict_relative_position_buckets
+ )
+
+ # [batch_size, gram, num_heads, sequence_length, -1]
+ predict_relative_pos_embeddings = predict_relative_pos_embeddings.view(
+ batch_size, self.ngram, self.num_attn_heads, sequence_length, -1
+ )
+
+ return predict_relative_pos_embeddings
+
+
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetEncoderLayer with ProphetNet->XLMProphetNet, Prophetnet->XLMProphetnet
+class XLMProphetNetEncoderLayer(nn.Module):
+ """
+ Encoder block for XLMProphetnet
+ """
+
+ def __init__(self, config: XLMProphetNetConfig):
+ super().__init__()
+ # 1st residual block
+ self.self_attn = XLMProphetNetAttention(config, config.num_encoder_attention_heads)
+ self.self_attn_layer_norm = LayerNorm(config.hidden_size)
+
+ # 2nd residual block
+ self.feed_forward = XLMProphetNetFeedForward(config, config.encoder_ffn_dim)
+ self.feed_forward_layer_norm = LayerNorm(config.hidden_size)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ output_attentions: bool = False,
+ ):
+ # 1st residual block
+ attention_output, attn_weights, _ = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = self.self_attn_layer_norm(attention_output + hidden_states)
+
+ # 2nd residual block
+ feed_forward_output = self.feed_forward(hidden_states)
+ hidden_states = self.feed_forward_layer_norm(feed_forward_output + hidden_states)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLayer with Prophetnet->XLMProphetnet, ProphetNet->XLMProphetNet
+class XLMProphetNetDecoderLayer(nn.Module):
+ """
+ Decoder block for XLMProphetnet
+ """
+
+ def __init__(self, config: XLMProphetNetConfig):
+ super().__init__()
+ # 1st residual block
+ self.self_attn = XLMProphetNetNgramSelfAttention(config)
+ self.self_attn_layer_norm = LayerNorm(config.hidden_size)
+
+ # 2nd residual block
+ if config.add_cross_attention:
+ self.cross_attn = XLMProphetNetAttention(config, config.num_decoder_attention_heads)
+ self.cross_attn_layer_norm = LayerNorm(config.hidden_size)
+
+ # 3rd residual block
+ self.feed_forward = XLMProphetNetFeedForward(config, config.decoder_ffn_dim)
+ self.feed_forward_layer_norm = LayerNorm(config.hidden_size)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ encoder_hidden_states=None,
+ encoder_attn_mask=None,
+ layer_head_mask=None,
+ cross_attn_layer_head_mask=None,
+ extended_predict_attention_mask=None,
+ main_relative_position_buckets=None,
+ predict_relative_position_buckets=None,
+ position_ids=None,
+ past_key_value=None,
+ use_cache: bool = True,
+ output_attentions: bool = False,
+ ):
+ # 1st residual block
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ ngram_attention_output, self_attn_weights, self_attn_weights_ngram, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ past_key_value=self_attn_past_key_value,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ extended_predict_attention_mask=extended_predict_attention_mask,
+ main_relative_position_buckets=main_relative_position_buckets,
+ predict_relative_position_buckets=predict_relative_position_buckets,
+ position_ids=position_ids,
+ )
+ hidden_states = self.self_attn_layer_norm(hidden_states + ngram_attention_output)
+
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ cross_attn_weights = None
+ if encoder_hidden_states is not None:
+ # 2nd residual block
+ attention_output, cross_attn_weights, cross_attn_present_key_value = self.cross_attn(
+ hidden_states=hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attn_mask,
+ layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=cross_attn_past_key_value,
+ output_attentions=output_attentions,
+ )
+ hidden_states = self.cross_attn_layer_norm(attention_output + hidden_states)
+
+ # add cross-attn to positions 3,4 of present_key_value tuple
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ # 3rd residual block
+ feed_forward_output = self.feed_forward(hidden_states)
+ hidden_states = self.feed_forward_layer_norm(feed_forward_output + hidden_states)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights, self_attn_weights_ngram, cross_attn_weights)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ return outputs
+
+
+@add_start_docstrings(
+ "The standalone encoder part of the XLMProphetNetModel.",
+ XLM_PROPHETNET_START_DOCSTRING,
+)
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetEncoder with microsoft/prophetnet-large-uncased->patrickvonplaten/xprophetnet-large-uncased-standalone, ProphetNet->XLMProphetNet, PROPHETNET->XLM_PROPHETNET
+class XLMProphetNetEncoder(XLMProphetNetPreTrainedModel):
+ r"""
+ word_embeddings (`torch.nn.Embeddings` of shape `(config.vocab_size, config.hidden_size)`, *optional*):
+ The word embedding parameters. This can be used to initialize [`XLMProphetNetEncoder`] with pre-defined word
+ embeddings instead of randomly initialized word embeddings.
+ """
+
+ def __init__(self, config: XLMProphetNetConfig, word_embeddings: nn.Embedding = None):
+ super().__init__(config)
+
+ self.word_embeddings = (
+ word_embeddings
+ if word_embeddings is not None
+ else nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ )
+ self.position_embeddings = XLMProphetNetPositionalEmbeddings(config)
+ self.embeddings_layer_norm = LayerNorm(config.hidden_size)
+
+ self.layers = nn.ModuleList([XLMProphetNetEncoderLayer(config) for _ in range(config.num_encoder_layers)])
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.word_embeddings = value
+
+ @add_start_docstrings_to_model_forward(XLM_PROPHETNET_STANDALONE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, XLMProphetNetEncoder
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone")
+ >>> model = XLMProphetNetEncoder.from_pretrained("patrickvonplaten/prophetnet-large-uncased-standalone")
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
+ >>> outputs = model(**inputs)
+
+ >>> last_hidden_states = outputs.last_hidden_state
+ ```"""
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is None and inputs_embeds is None:
+ raise ValueError("Either input_ids or inputs_embeds has to be passed.")
+ elif input_ids is not None and inputs_embeds is not None:
+ raise ValueError("Make sure to only pass input_ids or inputs_embeds.")
+ elif input_ids is not None and inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+
+ # prepare attention mask
+ if attention_mask is not None:
+ extended_attention_mask = (
+ 1.0 - attention_mask[:, None, None, :].repeat(1, self.config.num_encoder_attention_heads, 1, 1)
+ ) * torch.finfo(self.dtype).min
+ extended_attention_mask = extended_attention_mask.to(inputs_embeds.dtype)
+ else:
+ extended_attention_mask = None
+
+ position_embeddings, position_ids = self.position_embeddings(inputs_embeds.shape[:2], inputs_embeds.device)
+
+ hidden_states = inputs_embeds + position_embeddings
+ hidden_states = self.embeddings_layer_norm(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.config.dropout, training=self.training)
+
+ encoder_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ # check if head_mask has a correct number of layers specified if desired
+ if head_mask is not None:
+ assert head_mask.size()[0] == (
+ len(self.layers)
+ ), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_hidden_states = encoder_hidden_states + (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ encoder_layer.__call__,
+ hidden_states,
+ extended_attention_mask,
+ (head_mask[idx] if head_mask is not None else None),
+ output_attentions,
+ )
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask=extended_attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ encoder_hidden_states = encoder_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_hidden_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_hidden_states, attentions=all_attentions
+ )
+
+
+@add_start_docstrings(
+ "The standalone decoder part of the XLMProphetNetModel.",
+ XLM_PROPHETNET_START_DOCSTRING,
+)
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoder with microsoft/prophetnet-large-uncased->patrickvonplaten/xprophetnet-large-uncased-standalone, ProphetNet->XLMProphetNet, PROPHETNET->XLM_PROPHETNET,
+class XLMProphetNetDecoder(XLMProphetNetPreTrainedModel):
+ r"""
+ word_embeddings (`torch.nn.Embeddings` of shape `(config.vocab_size, config.hidden_size)`, *optional*):
+ The word embedding parameters. This can be used to initialize [`XLMProphetNetEncoder`] with pre-defined word
+ embeddings instead of randomly initialized word embeddings.
+ """
+
+ def __init__(self, config: XLMProphetNetConfig, word_embeddings: Optional[nn.Embedding] = None):
+ super().__init__(config)
+
+ self.ngram = config.ngram
+ self.num_buckets = config.num_buckets
+ self.relative_max_distance = config.relative_max_distance
+ self.dropout = config.dropout
+ self.max_target_positions = config.max_position_embeddings
+
+ self.word_embeddings = (
+ word_embeddings
+ if word_embeddings is not None
+ else nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ )
+ self.position_embeddings = XLMProphetNetPositionalEmbeddings(config)
+
+ self.ngram_embeddings = nn.Embedding(self.ngram, config.hidden_size, None)
+ self.layers = nn.ModuleList([XLMProphetNetDecoderLayer(config) for _ in range(config.num_decoder_layers)])
+ self.embeddings_layer_norm = LayerNorm(config.hidden_size)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.word_embeddings = value
+
+ @add_start_docstrings_to_model_forward(XLM_PROPHETNET_STANDALONE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=XLMProphetNetDecoderModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, XLMProphetNetDecoderModelOutput]:
+ r"""
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, XLMProphetNetDecoder
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone")
+ >>> model = XLMProphetNetDecoder.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone", add_cross_attention=False)
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
+ >>> outputs = model(**inputs)
+
+ >>> last_hidden_states = outputs.last_hidden_state
+ ```"""
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is None and inputs_embeds is None:
+ raise ValueError("Either `decoder_input_ids` or `decoder_inputs_embeds` has to be passed.")
+ elif input_ids is not None and inputs_embeds is not None:
+ raise ValueError("Make sure to only pass `decoder_input_ids` or `decoder_inputs_embeds`.")
+ elif input_ids is not None and inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+
+ batch_size, sequence_length = inputs_embeds.shape[:2]
+
+ main_stream_pos_embed, position_ids = self.position_embeddings(
+ (batch_size, sequence_length),
+ device=inputs_embeds.device,
+ past_key_values=past_key_values,
+ )
+
+ if past_key_values is not None:
+ main_relative_position_buckets, predict_relative_position_buckets = None, None
+ else:
+ (
+ main_relative_position_buckets,
+ predict_relative_position_buckets,
+ ) = self.compute_buffered_relative_buckets(position_ids)
+ predicting_stream_pos_embed = self.position_embeddings._forward(position_ids + 1)
+
+ # add position embeddings
+ hidden_states = inputs_embeds + main_stream_pos_embed
+
+ ngram_embeddings = self.ngram_embeddings.weight
+
+ # prepare attention mask
+ if past_key_values is not None:
+ assert (
+ hidden_states.size(1) == 1
+ ), "At the moment `use_cache` is only supported for `decoder_input_ids` of length 1"
+
+ ngram_hidden_states = [
+ (ngram_embeddings[ngram - 1] + predicting_stream_pos_embed).repeat(batch_size, 1, 1)
+ for ngram in range(self.ngram)
+ ]
+ extended_attention_mask = None
+ extended_predict_attention_mask = None
+ else:
+ ngram_hidden_states = [
+ (ngram_embeddings[ngram - 1] + predicting_stream_pos_embed) for ngram in range(self.ngram)
+ ]
+ extended_attention_mask = self.prepare_attention_mask(hidden_states, attention_mask)
+ extended_predict_attention_mask = self.prepare_predict_attention_mask(hidden_states, attention_mask)
+
+ # prepare encoder attention mask
+ if encoder_attention_mask is not None:
+ extended_encoder_attention_mask = (
+ 1.0 - encoder_attention_mask[:, None, None, :].repeat(1, self.config.num_decoder_attention_heads, 1, 1)
+ ) * torch.finfo(self.dtype).min
+ extended_encoder_attention_mask = extended_encoder_attention_mask.to(inputs_embeds.dtype)
+ else:
+ extended_encoder_attention_mask = None
+
+ hidden_states = torch.cat([hidden_states] + ngram_hidden_states, 1)
+
+ if self.embeddings_layer_norm:
+ hidden_states = self.embeddings_layer_norm(hidden_states)
+
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ # init attentions, hidden_states and cache with empty tuples
+ all_main_stream_hidden_states = () if output_hidden_states else None
+ all_ngram_stream_hidden_states = () if output_hidden_states and self.config.ngram > 0 else None
+
+ all_main_stream_attns = () if output_attentions else None
+ all_ngram_stream_attns = () if output_attentions else None
+ all_cross_attns = () if output_attentions and self.config.add_cross_attention else None
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ present_key_values = () if use_cache else None
+
+ # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
+ for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
+ if attn_mask is not None:
+ assert attn_mask.size()[0] == (len(self.layers)), (
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
+ f" {head_mask.size()[0]}."
+ )
+ for idx, decoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ # grad cannot be kept because tensor is sliced
+ all_main_stream_hidden_states += (hidden_states[:, :sequence_length],)
+ if self.config.ngram > 0:
+ all_ngram_stream_hidden_states += (hidden_states[:, sequence_length:],)
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ extended_attention_mask,
+ encoder_hidden_states,
+ extended_encoder_attention_mask,
+ (head_mask[idx] if head_mask is not None else None),
+ (cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None),
+ extended_predict_attention_mask,
+ main_relative_position_buckets,
+ predict_relative_position_buckets,
+ position_ids,
+ None,
+ use_cache,
+ output_attentions,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=extended_attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attn_mask=extended_encoder_attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ cross_attn_layer_head_mask=(
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
+ ),
+ extended_predict_attention_mask=extended_predict_attention_mask,
+ main_relative_position_buckets=main_relative_position_buckets,
+ predict_relative_position_buckets=predict_relative_position_buckets,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ present_key_values += (layer_outputs[4 if output_attentions else 1],)
+
+ if output_attentions:
+ all_main_stream_attns += (layer_outputs[1],)
+ all_ngram_stream_attns += (layer_outputs[2],)
+
+ if self.config.add_cross_attention:
+ all_cross_attns += (layer_outputs[3],)
+
+ if output_hidden_states:
+ all_main_stream_hidden_states += (hidden_states[:, :sequence_length],)
+ if self.config.ngram > 0:
+ all_ngram_stream_hidden_states += (hidden_states[:, sequence_length:],)
+
+ # split last_hidden_state for return
+ last_hidden_state = hidden_states[:, :sequence_length]
+ last_hidden_state_ngram = hidden_states[:, sequence_length:] if self.config.ngram > 0 else None
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ last_hidden_state,
+ last_hidden_state_ngram,
+ present_key_values,
+ all_main_stream_hidden_states,
+ all_ngram_stream_hidden_states,
+ all_main_stream_attns,
+ all_ngram_stream_attns,
+ all_cross_attns,
+ ]
+ if v is not None
+ )
+ return XLMProphetNetDecoderModelOutput(
+ last_hidden_state=last_hidden_state,
+ last_hidden_state_ngram=last_hidden_state_ngram,
+ past_key_values=present_key_values,
+ hidden_states=all_main_stream_hidden_states,
+ hidden_states_ngram=all_ngram_stream_hidden_states,
+ attentions=all_main_stream_attns,
+ ngram_attentions=all_ngram_stream_attns,
+ cross_attentions=all_cross_attns,
+ )
+
+ def compute_buffered_relative_buckets(self, position_ids):
+ batch_size, sequence_length = position_ids.shape
+
+ position_ids = torch.arange(1, self.max_target_positions).to(position_ids.device).repeat(1, 1)
+ main_relative_buckets, predict_relative_buckets = compute_all_stream_relative_buckets(
+ self.num_buckets, self.relative_max_distance, position_ids
+ )
+
+ # buffer relative buckets
+ main_relative_buckets = main_relative_buckets[:, :sequence_length, :sequence_length].repeat(batch_size, 1, 1)
+ predict_relative_buckets = torch.cat(
+ [
+ predict_relative_buckets[:, :sequence_length, :sequence_length],
+ predict_relative_buckets[
+ :, :sequence_length, self.max_target_positions : self.max_target_positions + sequence_length
+ ],
+ ],
+ 2,
+ ).repeat(batch_size, 1, 1)
+
+ return main_relative_buckets, predict_relative_buckets
+
+ def prepare_attention_mask(self, hidden_states, attention_mask):
+ batch_size, seq_length = hidden_states.shape[:2]
+
+ # get causal mask
+ causal_mask = torch.full(
+ (seq_length, seq_length),
+ torch.finfo(hidden_states.dtype).min,
+ dtype=hidden_states.dtype,
+ device=hidden_states.device,
+ )
+ causal_mask = torch.triu(causal_mask, 1)
+
+ extended_causal_mask = causal_mask[:seq_length, :seq_length][None, None, :, :].expand(
+ (batch_size, self.config.num_decoder_attention_heads) + causal_mask.shape
+ )
+
+ # add usual attention mask
+ if attention_mask is not None:
+ extended_attention_mask = (1.0 - attention_mask[:, None, None, :]) * torch.finfo(self.dtype).min
+ extended_attention_mask = extended_causal_mask + extended_attention_mask
+ else:
+ extended_attention_mask = extended_causal_mask
+ return extended_attention_mask.to(hidden_states.dtype)
+
+ def prepare_predict_attention_mask(self, hidden_states, attention_mask):
+ batch_size, seq_length = hidden_states.shape[:2]
+
+ # get causal mask
+ predict_causal_mask = ngram_attention_bias(
+ self.max_target_positions, self.ngram, hidden_states.device, hidden_states.dtype
+ )
+ predict_causal_mask = torch.cat(
+ [
+ predict_causal_mask[:, :seq_length, :seq_length],
+ predict_causal_mask[
+ :, :seq_length, self.max_target_positions : self.max_target_positions + seq_length
+ ],
+ ],
+ dim=-1,
+ )
+ extended_predict_causal_mask = predict_causal_mask[None, None, :, :, :].expand(
+ (batch_size, self.config.num_decoder_attention_heads) + predict_causal_mask.shape
+ )
+
+ # add usual attention mask
+ if attention_mask is not None:
+ extended_attention_mask = (1.0 - attention_mask[:, None, None, None, :]) * torch.finfo(self.dtype).min
+ extended_attention_mask = extended_attention_mask.expand(
+ (batch_size, self.config.num_decoder_attention_heads, self.ngram, seq_length, seq_length)
+ )
+ # predicted stream attention_mask should always be 0
+ extended_attention_mask = torch.cat(
+ [extended_attention_mask, torch.zeros_like(extended_attention_mask)], dim=-1
+ )
+ extended_predict_attention_mask = extended_predict_causal_mask + extended_attention_mask
+ else:
+ extended_predict_attention_mask = extended_predict_causal_mask
+ return extended_predict_attention_mask.to(hidden_states.dtype)
+
+
+@add_start_docstrings(
+ "The bare XLMProphetNet Model outputting raw hidden-states without any specific head on top.",
+ XLM_PROPHETNET_START_DOCSTRING,
+)
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetModel with microsoft/prophetnet-large-uncased->patrickvonplaten/xprophetnet-large-uncased-standalone, ProphetNet->XLMProphetNet, PROPHETNET->XLM_PROPHETNET
+class XLMProphetNetModel(XLMProphetNetPreTrainedModel):
+ _tied_weights_keys = ["encoder.word_embeddings.weight", "decoder.word_embeddings.weight"]
+
+ def __init__(self, config: XLMProphetNetConfig):
+ super().__init__(config)
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+
+ encoder_config = copy.deepcopy(config)
+ encoder_config.is_encoder_decoder = False
+ encoder_config.use_cache = False
+ self.encoder = XLMProphetNetEncoder(encoder_config, self.word_embeddings)
+
+ decoder_config = copy.deepcopy(config)
+ decoder_config.is_decoder = True
+ decoder_config.is_encoder_decoder = False
+ self.decoder = XLMProphetNetDecoder(decoder_config, self.word_embeddings)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.word_embeddings = value
+ self.encoder.word_embeddings = self.word_embeddings
+ self.decoder.word_embeddings = self.word_embeddings
+
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.encoder.word_embeddings, self.word_embeddings)
+ self._tie_or_clone_weights(self.decoder.word_embeddings, self.word_embeddings)
+
+ def get_encoder(self):
+ return self.encoder
+
+ def get_decoder(self):
+ return self.decoder
+
+ @add_start_docstrings_to_model_forward(XLM_PROPHETNET_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=XLMProphetNetSeq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.Tensor] = None,
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ decoder_inputs_embeds: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, XLMProphetNetSeq2SeqModelOutput]:
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, XLMProphetNetModel
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone")
+ >>> model = XLMProphetNetModel.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone")
+
+ >>> input_ids = tokenizer(
+ ... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
+ ... ).input_ids # Batch size 1
+ >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
+ >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
+
+ >>> last_hidden_states = outputs.last_hidden_state # main stream hidden states
+ >>> last_hidden_states_ngram = outputs.last_hidden_state_ngram # predict hidden states
+ ```"""
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if encoder_outputs is None:
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ # decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn)
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=attention_mask,
+ head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=decoder_inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ use_cache=use_cache,
+ return_dict=return_dict,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+ return XLMProphetNetSeq2SeqModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ last_hidden_state_ngram=decoder_outputs.last_hidden_state_ngram,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_ngram_hidden_states=decoder_outputs.hidden_states_ngram,
+ decoder_attentions=decoder_outputs.attentions,
+ decoder_ngram_attentions=decoder_outputs.ngram_attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ "The XLMProphetNet Model with a language modeling head. Can be used for sequence generation tasks.",
+ XLM_PROPHETNET_START_DOCSTRING,
+)
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetForConditionalGeneration with microsoft/prophetnet-large-uncased->patrickvonplaten/xprophetnet-large-uncased-standalone, ProphetNet->XLMProphetNet, PROPHETNET->XLM_PROPHETNET
+class XLMProphetNetForConditionalGeneration(XLMProphetNetPreTrainedModel):
+ _tied_weights_keys = ["encoder.word_embeddings.weight", "decoder.word_embeddings.weight", "lm_head.weight"]
+
+ def __init__(self, config: XLMProphetNetConfig):
+ super().__init__(config)
+ self.prophetnet = XLMProphetNetModel(config)
+ self.padding_idx = config.pad_token_id
+ self.disable_ngram_loss = config.disable_ngram_loss
+
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.prophetnet.word_embeddings, self.lm_head)
+
+ def get_input_embeddings(self):
+ return self.prophetnet.word_embeddings
+
+ @add_start_docstrings_to_model_forward(XLM_PROPHETNET_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=XLMProphetNetSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.Tensor] = None,
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[torch.Tensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ decoder_inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, XLMProphetNetSeq2SeqLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for
+ labels in `[0, ..., config.vocab_size]`
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, XLMProphetNetForConditionalGeneration
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone")
+ >>> model = XLMProphetNetForConditionalGeneration.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone")
+
+ >>> input_ids = tokenizer(
+ ... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
+ ... ).input_ids # Batch size 1
+ >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
+ >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
+
+ >>> logits_next_token = outputs.logits # logits to predict next token as usual
+ >>> logits_ngram_next_tokens = outputs.logits_ngram # logits to predict 2nd, 3rd, ... next tokens
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
+ # get decoder inputs from shifting lm labels to the right
+ decoder_input_ids = self._shift_right(labels)
+
+ outputs = self.prophetnet(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ encoder_outputs=encoder_outputs,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ decoder_inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ batch_size, sequence_length = (
+ decoder_input_ids.shape if decoder_input_ids is not None else decoder_inputs_embeds.shape[:2]
+ )
+
+ predicting_streams = outputs[1].view(batch_size, self.config.ngram, sequence_length, -1)
+ predict_logits = self.lm_head(predicting_streams)
+
+ logits = predict_logits[:, 0]
+ logits_ngram = predict_logits[:, 1:] if self.config.ngram > 1 else None
+
+ # To use .view in loss computation, make sure that logits is contiguous.
+ if not logits.is_contiguous():
+ logits = logits.contiguous()
+
+ loss = None
+ if labels is not None:
+ loss = self._compute_loss(predict_logits, labels)
+
+ if not return_dict:
+ all_logits = tuple(v for v in [logits, logits_ngram] if v is not None)
+ return (loss,) + all_logits + outputs[2:] if loss is not None else all_logits + outputs[2:]
+ else:
+ return XLMProphetNetSeq2SeqLMOutput(
+ loss=loss,
+ logits=logits,
+ logits_ngram=logits_ngram,
+ past_key_values=outputs.past_key_values,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_ngram_hidden_states=outputs.decoder_ngram_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ decoder_ngram_attentions=outputs.decoder_ngram_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ )
+
+ def _compute_loss(self, logits, labels, ignore_index=-100):
+ expend_targets = labels.new_zeros(self.config.ngram, labels.size(0), labels.size(1)).fill_(ignore_index)
+
+ for i in range(self.config.ngram):
+ if i > 0 and self.disable_ngram_loss:
+ break
+ expend_targets[i, :, :] = labels
+
+ logits = logits.transpose(0, 1).contiguous()
+ lprobs = nn.functional.log_softmax(
+ logits.view(-1, logits.size(-1)),
+ dim=-1,
+ dtype=torch.float32,
+ )
+
+ loss = nn.functional.nll_loss(lprobs, expend_targets.view(-1), reduction="mean")
+
+ if self.config.eps > 0.0:
+ smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
+ non_masked_tokens = expend_targets.ne(ignore_index).view(-1)
+ smooth_loss = smooth_loss[non_masked_tokens]
+ smooth_loss = smooth_loss.mean()
+
+ eps_i = self.config.eps / lprobs.size(-1)
+ loss = (1.0 - self.config.eps) * loss + eps_i * smooth_loss
+
+ return loss
+
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ head_mask=None,
+ decoder_head_mask=None,
+ cross_attn_head_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ assert encoder_outputs is not None, "`encoder_outputs` have to be passed for generation."
+
+ if past_key_values:
+ decoder_input_ids = decoder_input_ids[:, -1:]
+ # first step, decoder_cached_states are empty
+ return {
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
+ "encoder_outputs": encoder_outputs,
+ "past_key_values": past_key_values,
+ "decoder_input_ids": decoder_input_ids,
+ "attention_mask": attention_mask,
+ "head_mask": head_mask,
+ "decoder_head_mask": decoder_head_mask,
+ "cross_attn_head_mask": cross_attn_head_mask,
+ "use_cache": use_cache,
+ }
+
+ def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
+ return self._shift_right(labels)
+
+ @staticmethod
+ # Copied from transformers.models.bart.modeling_bart.BartForConditionalGeneration._reorder_cache
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ # cached cross_attention states don't have to be reordered -> they are always the same
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2])
+ + layer_past[2:],
+ )
+ return reordered_past
+
+ def get_encoder(self):
+ return self.prophetnet.encoder
+
+ def get_decoder(self):
+ return self.prophetnet.decoder
+
+
+@add_start_docstrings(
+ "The standalone decoder part of the XLMProphetNetModel with a lm head on top. The model can be used for causal"
+ " language modeling.",
+ XLM_PROPHETNET_START_DOCSTRING,
+)
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetForCausalLM with microsoft/prophetnet-large-uncased->patrickvonplaten/xprophetnet-large-uncased-standalone, ProphetNet->XLMProphetNet, PROPHETNET->XLM_PROPHETNET
+class XLMProphetNetForCausalLM(XLMProphetNetPreTrainedModel):
+ _tied_weights_keys = [
+ "prophetnet.word_embeddings.weight",
+ "prophetnet.decoder.word_embeddings.weight",
+ "lm_head.weight",
+ ]
+
+ def __init__(self, config: XLMProphetNetConfig):
+ # set config for CLM
+ config = copy.deepcopy(config)
+ config.is_decoder = True
+ config.is_encoder_decoder = False
+ super().__init__(config)
+ self.prophetnet = XLMProphetNetDecoderWrapper(config)
+
+ self.padding_idx = config.pad_token_id
+ self.disable_ngram_loss = config.disable_ngram_loss
+
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.prophetnet.decoder.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.prophetnet.decoder.word_embeddings = value
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.prophetnet.decoder.word_embeddings, self.lm_head)
+
+ def set_decoder(self, decoder):
+ self.prophetnet.decoder = decoder
+
+ def get_decoder(self):
+ return self.prophetnet.decoder
+
+ @add_start_docstrings_to_model_forward(XLM_PROPHETNET_STANDALONE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=XLMProphetNetDecoderLMOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, XLMProphetNetDecoderLMOutput]:
+ r"""
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
+ ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, XLMProphetNetForCausalLM
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone")
+ >>> model = XLMProphetNetForCausalLM.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone")
+ >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
+ >>> outputs = model(**inputs)
+
+ >>> logits = outputs.logits
+
+ >>> # Model can also be used with EncoderDecoder framework
+ >>> from transformers import BertTokenizer, EncoderDecoderModel, AutoTokenizer
+ >>> import torch
+
+ >>> tokenizer_enc = BertTokenizer.from_pretrained("google-bert/bert-large-uncased")
+ >>> tokenizer_dec = AutoTokenizer.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone")
+ >>> model = EncoderDecoderModel.from_encoder_decoder_pretrained(
+ ... "google-bert/bert-large-uncased", "patrickvonplaten/xprophetnet-large-uncased-standalone"
+ ... )
+
+ >>> ARTICLE = (
+ ... "the us state department said wednesday it had received no "
+ ... "formal word from bolivia that it was expelling the us ambassador there "
+ ... "but said the charges made against him are `` baseless ."
+ ... )
+ >>> input_ids = tokenizer_enc(ARTICLE, return_tensors="pt").input_ids
+ >>> labels = tokenizer_dec(
+ ... "us rejects charges against its ambassador in bolivia", return_tensors="pt"
+ ... ).input_ids
+ >>> outputs = model(input_ids=input_ids, decoder_input_ids=labels[:, :-1], labels=labels[:, 1:])
+
+ >>> loss = outputs.loss
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn)
+ outputs = self.prophetnet.decoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ head_mask=head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ batch_size, sequence_length = input_ids.shape if input_ids is not None else inputs_embeds.shape[:2]
+
+ predicting_streams = outputs[1].view(batch_size, self.config.ngram, sequence_length, -1)
+ predict_logits = self.lm_head(predicting_streams)
+
+ logits = predict_logits[:, 0]
+ logits_ngram = predict_logits[:, 1:] if self.config.ngram > 1 else None
+
+ loss = None
+ if labels is not None:
+ loss = self._compute_loss(predict_logits, labels)
+
+ if not return_dict:
+ all_logits = tuple(v for v in [logits, logits_ngram] if v is not None)
+ return (loss,) + all_logits + outputs[2:] if loss is not None else all_logits + outputs[2:]
+ else:
+ return XLMProphetNetDecoderLMOutput(
+ loss=loss,
+ logits=logits,
+ logits_ngram=logits_ngram,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ hidden_states_ngram=outputs.hidden_states_ngram,
+ attentions=outputs.attentions,
+ ngram_attentions=outputs.ngram_attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+ def _compute_loss(self, logits, labels, ignore_index=-100):
+ expend_targets = labels.new_zeros(self.config.ngram, labels.size(0), labels.size(1)).fill_(ignore_index)
+
+ for i in range(self.config.ngram):
+ if i > 0 and self.disable_ngram_loss:
+ break
+ expend_targets[i, :, :] = labels
+
+ logits = logits.transpose(0, 1).contiguous()
+ lprobs = nn.functional.log_softmax(
+ logits.view(-1, logits.size(-1)),
+ dim=-1,
+ dtype=torch.float32,
+ )
+
+ loss = nn.functional.nll_loss(lprobs, expend_targets.view(-1), reduction="mean")
+
+ if self.config.eps > 0.0:
+ smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
+ non_masked_tokens = expend_targets.ne(ignore_index).view(-1)
+ smooth_loss = smooth_loss[non_masked_tokens]
+ smooth_loss = smooth_loss.mean()
+
+ eps_i = self.config.eps / lprobs.size(-1)
+ loss = (1.0 - self.config.eps) * loss + eps_i * smooth_loss
+
+ return loss
+
+ def prepare_inputs_for_generation(
+ self,
+ input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ head_mask=None,
+ use_cache=None,
+ **kwargs,
+ ):
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
+ if attention_mask is None:
+ attention_mask = input_ids.new_ones(input_ids.shape)
+
+ if past_key_values:
+ input_ids = input_ids[:, -1:]
+ # first step, decoder_cached_states are empty
+ return {
+ "input_ids": input_ids, # encoder_outputs is defined. input_ids not needed
+ "attention_mask": attention_mask,
+ "head_mask": head_mask,
+ "past_key_values": past_key_values,
+ "use_cache": use_cache,
+ }
+
+ @staticmethod
+ # Copied from transformers.models.bart.modeling_bart.BartForCausalLM._reorder_cache
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+ return reordered_past
+
+
+# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderWrapper with ProphetNet->XLMProphetNet, prophetnet->XLMProphetNet
+class XLMProphetNetDecoderWrapper(XLMProphetNetPreTrainedModel):
+ """
+ This is a wrapper class, so that [`XLMProphetNetForCausalLM`] can correctly be loaded from pretrained XLMProphetNet
+ classes.
+ """
+
+ def __init__(self, config: XLMProphetNetConfig):
+ super().__init__(config)
+
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ self.decoder = XLMProphetNetDecoder(config, word_embeddings=self.word_embeddings)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def _tie_weights(self):
+ self._tie_or_clone_weights(self.word_embeddings, self.decoder.get_input_embeddings())
+
+ def forward(self, *args, **kwargs):
+ return self.decoder(*args, **kwargs)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/tokenization_xlm_prophetnet.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/tokenization_xlm_prophetnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..fa65fa5cbfbaf2fccd6fcad94bf68a3ef9ed22d9
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/tokenization_xlm_prophetnet.py
@@ -0,0 +1,323 @@
+# coding=utf-8
+# Copyright 2020 The Microsoft Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections
+import os
+from shutil import copyfile
+from typing import Any, Dict, List, Optional, Tuple
+
+from ...tokenization_utils import PreTrainedTokenizer
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+SPIECE_UNDERLINE = "▁"
+
+VOCAB_FILES_NAMES = {"vocab_file": "prophetnet.tokenizer"}
+
+
+def load_vocab(vocab_file):
+ """Loads a vocabulary file into a dictionary."""
+ vocab = collections.OrderedDict()
+ with open(vocab_file, "r", encoding="utf-8") as reader:
+ tokens = reader.readlines()
+ for index, token in enumerate(tokens):
+ token = token.rstrip("\n")
+ vocab[token] = index
+ return vocab
+
+
+class XLMProphetNetTokenizer(PreTrainedTokenizer):
+ """
+ Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
+ [SentencePiece](https://github.com/google/sentencepiece).
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ bos_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
+ The token used is the `sep_token`.
+
+
+
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
+ The token used for padding, for example when batching sequences of different lengths.
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ sp_model_kwargs (`dict`, *optional*):
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
+ to set:
+
+ - `enable_sampling`: Enable subword regularization.
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
+
+ - `nbest_size = {0,1}`: No sampling is performed.
+ - `nbest_size > 1`: samples from the nbest_size results.
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
+ using forward-filtering-and-backward-sampling algorithm.
+
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
+ BPE-dropout.
+
+ Attributes:
+ sp_model (`SentencePieceProcessor`):
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file,
+ bos_token="[SEP]",
+ eos_token="[SEP]",
+ sep_token="[SEP]",
+ unk_token="[UNK]",
+ pad_token="[PAD]",
+ cls_token="[CLS]",
+ mask_token="[MASK]",
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
+ **kwargs,
+ ) -> None:
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+
+ try:
+ import sentencepiece as spm
+ except ImportError:
+ logger.warning(
+ "You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
+ " pip install sentencepiece"
+ )
+ raise
+
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(str(vocab_file))
+ self.vocab_file = vocab_file
+
+ # Original fairseq vocab and spm vocab must be "aligned":
+ # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
+ # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
+ # fairseq | '' | '' | '' | '' | ',' | '.' | '▁' | 's' | '▁de' | '-'
+ # spm | '' | '' | '' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
+
+ # put special tokens and [unused] tokens into the vocab
+ self.fairseq_tokens_to_ids = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
+
+ for i in range(10):
+ tok = f"[unused{i}]"
+ self.fairseq_tokens_to_ids[tok] = 5 + i
+
+ # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
+ self.fairseq_offset = 12
+ self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
+
+ # TODO ArthurZ fairseq_ids_to_tokens should be removed
+
+ super().__init__(
+ bos_token=bos_token,
+ eos_token=eos_token,
+ sep_token=sep_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ sp_model_kwargs=self.sp_model_kwargs,
+ **kwargs,
+ )
+
+ @property
+ def can_save_slow_tokenizer(self) -> bool:
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["sp_model"] = None
+ return state
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+ try:
+ import sentencepiece as spm
+ except ImportError:
+ logger.warning(
+ "You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
+ " pip install sentencepiece"
+ )
+ raise
+
+ # for backward compatibility
+ if not hasattr(self, "sp_model_kwargs"):
+ self.sp_model_kwargs = {}
+
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(self.vocab_file)
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is None:
+ return ([0] * len(token_ids_0)) + [1]
+ return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLMProphetNet
+ does not make use of token type ids, therefore a list of zeros is returned.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of zeros.
+
+ """
+
+ sep = [self.sep_token_id]
+
+ if token_ids_1 is None:
+ return len(token_ids_0 + sep) * [0]
+ return len(token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
+
+ @property
+ def vocab_size(self):
+ return len(self.sp_model) + self.fairseq_offset
+
+ def get_vocab(self):
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ def _tokenize(self, text: str) -> str:
+ return self.sp_model.encode(text, out_type=str)
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ if token in self.fairseq_tokens_to_ids:
+ return self.fairseq_tokens_to_ids[token]
+ spm_id = self.sp_model.PieceToId(token)
+
+ # Need to return unknown token if the SP model returned 0
+ return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ if index in self.fairseq_ids_to_tokens:
+ return self.fairseq_ids_to_tokens[index]
+ return self.sp_model.IdToPiece(index - self.fairseq_offset)
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (strings for sub-words) in a single string."""
+ out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
+ return out_string
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+ elif not os.path.isfile(self.vocab_file):
+ with open(out_vocab_file, "wb") as fi:
+ content_spiece_model = self.sp_model.serialized_model_proto()
+ fi.write(content_spiece_model)
+
+ return (out_vocab_file,)
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A XLMProphetNet sequence has the following format:
+
+ - single sequence: `X [SEP]`
+ - pair of sequences: `A [SEP] B [SEP]`
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+
+ if token_ids_1 is None:
+ return token_ids_0 + [self.sep_token_id]
+ sep = [self.sep_token_id]
+ return token_ids_0 + sep + token_ids_1 + sep
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/yolos/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/yolos/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..28d59763bb85503b4ebc8c5aa8e8b299c45e586f
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/yolos/__init__.py
@@ -0,0 +1,73 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
+
+
+_import_structure = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
+
+try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["feature_extraction_yolos"] = ["YolosFeatureExtractor"]
+ _import_structure["image_processing_yolos"] = ["YolosImageProcessor"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_yolos"] = [
+ "YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "YolosForObjectDetection",
+ "YolosModel",
+ "YolosPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
+
+ try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .feature_extraction_yolos import YolosFeatureExtractor
+ from .image_processing_yolos import YolosImageProcessor
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_yolos import (
+ YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
+ YolosForObjectDetection,
+ YolosModel,
+ YolosPreTrainedModel,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/yolos/__pycache__/configuration_yolos.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/yolos/__pycache__/configuration_yolos.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..af376e88cdaf3508be926e80d73c03d028b442a7
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/yolos/__pycache__/configuration_yolos.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/yolos/__pycache__/convert_yolos_to_pytorch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/yolos/__pycache__/convert_yolos_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f8b31c5b434211922d5e7a68e56d014dc432decb
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/yolos/__pycache__/convert_yolos_to_pytorch.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/yolos/__pycache__/feature_extraction_yolos.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/yolos/__pycache__/feature_extraction_yolos.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..19606ca97e1da4198ab04145839498b9e0ae54f5
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/yolos/__pycache__/feature_extraction_yolos.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/yolos/__pycache__/image_processing_yolos.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/yolos/__pycache__/image_processing_yolos.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0b43def80e3d50186c18dcf82263f1022bdfa6c9
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/yolos/__pycache__/image_processing_yolos.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/yolos/configuration_yolos.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/yolos/configuration_yolos.py
new file mode 100644
index 0000000000000000000000000000000000000000..098210f1a732e28260fe92e2512d59745ad52195
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/yolos/configuration_yolos.py
@@ -0,0 +1,178 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" YOLOS model configuration"""
+
+from collections import OrderedDict
+from typing import Mapping
+
+from packaging import version
+
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class YolosConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`YolosModel`]. It is used to instantiate a YOLOS
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the YOLOS
+ [hustvl/yolos-base](https://huggingface.co/hustvl/yolos-base) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ image_size (`List[int]`, *optional*, defaults to `[512, 864]`):
+ The size (resolution) of each image.
+ patch_size (`int`, *optional*, defaults to 16):
+ The size (resolution) of each patch.
+ num_channels (`int`, *optional*, defaults to 3):
+ The number of input channels.
+ qkv_bias (`bool`, *optional*, defaults to `True`):
+ Whether to add a bias to the queries, keys and values.
+ num_detection_tokens (`int`, *optional*, defaults to 100):
+ The number of detection tokens.
+ use_mid_position_embeddings (`bool`, *optional*, defaults to `True`):
+ Whether to use the mid-layer position encodings.
+ auxiliary_loss (`bool`, *optional*, defaults to `False`):
+ Whether auxiliary decoding losses (loss at each decoder layer) are to be used.
+ class_cost (`float`, *optional*, defaults to 1):
+ Relative weight of the classification error in the Hungarian matching cost.
+ bbox_cost (`float`, *optional*, defaults to 5):
+ Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost.
+ giou_cost (`float`, *optional*, defaults to 2):
+ Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost.
+ bbox_loss_coefficient (`float`, *optional*, defaults to 5):
+ Relative weight of the L1 bounding box loss in the object detection loss.
+ giou_loss_coefficient (`float`, *optional*, defaults to 2):
+ Relative weight of the generalized IoU loss in the object detection loss.
+ eos_coefficient (`float`, *optional*, defaults to 0.1):
+ Relative classification weight of the 'no-object' class in the object detection loss.
+
+ Example:
+
+ ```python
+ >>> from transformers import YolosConfig, YolosModel
+
+ >>> # Initializing a YOLOS hustvl/yolos-base style configuration
+ >>> configuration = YolosConfig()
+
+ >>> # Initializing a model (with random weights) from the hustvl/yolos-base style configuration
+ >>> model = YolosModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "yolos"
+
+ def __init__(
+ self,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.0,
+ attention_probs_dropout_prob=0.0,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ image_size=[512, 864],
+ patch_size=16,
+ num_channels=3,
+ qkv_bias=True,
+ num_detection_tokens=100,
+ use_mid_position_embeddings=True,
+ auxiliary_loss=False,
+ class_cost=1,
+ bbox_cost=5,
+ giou_cost=2,
+ bbox_loss_coefficient=5,
+ giou_loss_coefficient=2,
+ eos_coefficient=0.1,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.intermediate_size = intermediate_size
+ self.hidden_act = hidden_act
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.qkv_bias = qkv_bias
+ self.num_detection_tokens = num_detection_tokens
+ self.use_mid_position_embeddings = use_mid_position_embeddings
+ self.auxiliary_loss = auxiliary_loss
+ # Hungarian matcher
+ self.class_cost = class_cost
+ self.bbox_cost = bbox_cost
+ self.giou_cost = giou_cost
+ # Loss coefficients
+ self.bbox_loss_coefficient = bbox_loss_coefficient
+ self.giou_loss_coefficient = giou_loss_coefficient
+ self.eos_coefficient = eos_coefficient
+
+
+class YolosOnnxConfig(OnnxConfig):
+ torch_onnx_minimum_version = version.parse("1.11")
+
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ return OrderedDict(
+ [
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
+ ]
+ )
+
+ @property
+ def atol_for_validation(self) -> float:
+ return 1e-4
+
+ @property
+ def default_onnx_opset(self) -> int:
+ return 12
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/yolos/convert_yolos_to_pytorch.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/yolos/convert_yolos_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..35238151ab93efe4700cc13906f1587574864c07
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/yolos/convert_yolos_to_pytorch.py
@@ -0,0 +1,268 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert YOLOS checkpoints from the original repository. URL: https://github.com/hustvl/YOLOS"""
+
+
+import argparse
+import json
+from pathlib import Path
+
+import requests
+import torch
+from huggingface_hub import hf_hub_download
+from PIL import Image
+
+from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
+from transformers.utils import logging
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+
+def get_yolos_config(yolos_name: str) -> YolosConfig:
+ config = YolosConfig()
+
+ # size of the architecture
+ if "yolos_ti" in yolos_name:
+ config.hidden_size = 192
+ config.intermediate_size = 768
+ config.num_hidden_layers = 12
+ config.num_attention_heads = 3
+ config.image_size = [800, 1333]
+ config.use_mid_position_embeddings = False
+ elif yolos_name == "yolos_s_dWr":
+ config.hidden_size = 330
+ config.num_hidden_layers = 14
+ config.num_attention_heads = 6
+ config.intermediate_size = 1320
+ elif "yolos_s" in yolos_name:
+ config.hidden_size = 384
+ config.intermediate_size = 1536
+ config.num_hidden_layers = 12
+ config.num_attention_heads = 6
+ elif "yolos_b" in yolos_name:
+ config.image_size = [800, 1344]
+
+ config.num_labels = 91
+ repo_id = "huggingface/label-files"
+ filename = "coco-detection-id2label.json"
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
+ id2label = {int(k): v for k, v in id2label.items()}
+ config.id2label = id2label
+ config.label2id = {v: k for k, v in id2label.items()}
+
+ return config
+
+
+# we split up the matrix of each encoder layer into queries, keys and values
+def read_in_q_k_v(state_dict: dict, config: YolosConfig, base_model: bool = False):
+ for i in range(config.num_hidden_layers):
+ # read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
+ in_proj_weight = state_dict.pop(f"blocks.{i}.attn.qkv.weight")
+ in_proj_bias = state_dict.pop(f"blocks.{i}.attn.qkv.bias")
+ # next, add query, keys and values (in that order) to the state dict
+ state_dict[f"encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[: config.hidden_size, :]
+ state_dict[f"encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size]
+ state_dict[f"encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[
+ config.hidden_size : config.hidden_size * 2, :
+ ]
+ state_dict[f"encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[
+ config.hidden_size : config.hidden_size * 2
+ ]
+ state_dict[f"encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[-config.hidden_size :, :]
+ state_dict[f"encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :]
+
+
+def rename_key(name: str) -> str:
+ if "backbone" in name:
+ name = name.replace("backbone", "vit")
+ if "cls_token" in name:
+ name = name.replace("cls_token", "embeddings.cls_token")
+ if "det_token" in name:
+ name = name.replace("det_token", "embeddings.detection_tokens")
+ if "mid_pos_embed" in name:
+ name = name.replace("mid_pos_embed", "encoder.mid_position_embeddings")
+ if "pos_embed" in name:
+ name = name.replace("pos_embed", "embeddings.position_embeddings")
+ if "patch_embed.proj" in name:
+ name = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection")
+ if "blocks" in name:
+ name = name.replace("blocks", "encoder.layer")
+ if "attn.proj" in name:
+ name = name.replace("attn.proj", "attention.output.dense")
+ if "attn" in name:
+ name = name.replace("attn", "attention.self")
+ if "norm1" in name:
+ name = name.replace("norm1", "layernorm_before")
+ if "norm2" in name:
+ name = name.replace("norm2", "layernorm_after")
+ if "mlp.fc1" in name:
+ name = name.replace("mlp.fc1", "intermediate.dense")
+ if "mlp.fc2" in name:
+ name = name.replace("mlp.fc2", "output.dense")
+ if "class_embed" in name:
+ name = name.replace("class_embed", "class_labels_classifier")
+ if "bbox_embed" in name:
+ name = name.replace("bbox_embed", "bbox_predictor")
+ if "vit.norm" in name:
+ name = name.replace("vit.norm", "vit.layernorm")
+
+ return name
+
+
+def convert_state_dict(orig_state_dict: dict, model: YolosForObjectDetection) -> dict:
+ for key in orig_state_dict.copy().keys():
+ val = orig_state_dict.pop(key)
+
+ if "qkv" in key:
+ key_split = key.split(".")
+ layer_num = int(key_split[2])
+ dim = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
+ if "weight" in key:
+ orig_state_dict[f"vit.encoder.layer.{layer_num}.attention.attention.query.weight"] = val[:dim, :]
+ orig_state_dict[f"vit.encoder.layer.{layer_num}.attention.attention.key.weight"] = val[
+ dim : dim * 2, :
+ ]
+ orig_state_dict[f"vit.encoder.layer.{layer_num}.attention.attention.value.weight"] = val[-dim:, :]
+ else:
+ orig_state_dict[f"vit.encoder.layer.{layer_num}.attention.attention.query.bias"] = val[:dim]
+ orig_state_dict[f"vit.encoder.layer.{layer_num}.attention.attention.key.bias"] = val[dim : dim * 2]
+ orig_state_dict[f"vit.encoder.layer.{layer_num}.attention.attention.value.bias"] = val[-dim:]
+ else:
+ orig_state_dict[rename_key(key)] = val
+
+ return orig_state_dict
+
+
+# We will verify our results on an image of cute cats
+def prepare_img() -> torch.Tensor:
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ im = Image.open(requests.get(url, stream=True).raw)
+ return im
+
+
+@torch.no_grad()
+def convert_yolos_checkpoint(
+ yolos_name: str, checkpoint_path: str, pytorch_dump_folder_path: str, push_to_hub: bool = False
+):
+ """
+ Copy/paste/tweak model's weights to our YOLOS structure.
+ """
+ config = get_yolos_config(yolos_name)
+
+ # load original state_dict
+ state_dict = torch.load(checkpoint_path, map_location="cpu")["model"]
+
+ # load 🤗 model
+ model = YolosForObjectDetection(config)
+ model.eval()
+ new_state_dict = convert_state_dict(state_dict, model)
+ model.load_state_dict(new_state_dict)
+
+ # Check outputs on an image, prepared by YolosImageProcessor
+ size = 800 if yolos_name != "yolos_ti" else 512
+ image_processor = YolosImageProcessor(format="coco_detection", size=size)
+ encoding = image_processor(images=prepare_img(), return_tensors="pt")
+ outputs = model(**encoding)
+ logits, pred_boxes = outputs.logits, outputs.pred_boxes
+
+ expected_slice_logits, expected_slice_boxes = None, None
+ if yolos_name == "yolos_ti":
+ expected_slice_logits = torch.tensor(
+ [[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]]
+ )
+ expected_slice_boxes = torch.tensor(
+ [[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]]
+ )
+ elif yolos_name == "yolos_s_200_pre":
+ expected_slice_logits = torch.tensor(
+ [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]]
+ )
+ expected_slice_boxes = torch.tensor(
+ [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]]
+ )
+ elif yolos_name == "yolos_s_300_pre":
+ expected_slice_logits = torch.tensor(
+ [[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]]
+ )
+ expected_slice_boxes = torch.tensor(
+ [[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]]
+ )
+ elif yolos_name == "yolos_s_dWr":
+ expected_slice_logits = torch.tensor(
+ [[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]]
+ )
+ expected_slice_boxes = torch.tensor(
+ [[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]]
+ )
+ elif yolos_name == "yolos_base":
+ expected_slice_logits = torch.tensor(
+ [[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]]
+ )
+ expected_slice_boxes = torch.tensor(
+ [[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]]
+ )
+ else:
+ raise ValueError(f"Unknown yolos_name: {yolos_name}")
+
+ assert torch.allclose(logits[0, :3, :3], expected_slice_logits, atol=1e-4)
+ assert torch.allclose(pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4)
+
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
+ print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}")
+ model.save_pretrained(pytorch_dump_folder_path)
+ print(f"Saving image processor to {pytorch_dump_folder_path}")
+ image_processor.save_pretrained(pytorch_dump_folder_path)
+
+ if push_to_hub:
+ model_mapping = {
+ "yolos_ti": "yolos-tiny",
+ "yolos_s_200_pre": "yolos-small",
+ "yolos_s_300_pre": "yolos-small-300",
+ "yolos_s_dWr": "yolos-small-dwr",
+ "yolos_base": "yolos-base",
+ }
+
+ print("Pushing to the hub...")
+ model_name = model_mapping[yolos_name]
+ image_processor.push_to_hub(model_name, organization="hustvl")
+ model.push_to_hub(model_name, organization="hustvl")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--yolos_name",
+ default="yolos_s_200_pre",
+ type=str,
+ help=(
+ "Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
+ " 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
+ ),
+ )
+ parser.add_argument(
+ "--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
+ )
+ parser.add_argument(
+ "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
+ )
+
+ args = parser.parse_args()
+ convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/yolos/feature_extraction_yolos.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/yolos/feature_extraction_yolos.py
new file mode 100644
index 0000000000000000000000000000000000000000..5696ee65bbdfa2b2712ebe9095b202c7856f5b19
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/yolos/feature_extraction_yolos.py
@@ -0,0 +1,43 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Feature extractor class for YOLOS."""
+
+import warnings
+
+from ...image_transforms import rgb_to_id as _rgb_to_id
+from ...utils import logging
+from .image_processing_yolos import YolosImageProcessor
+
+
+logger = logging.get_logger(__name__)
+
+
+def rgb_to_id(x):
+ warnings.warn(
+ "rgb_to_id has moved and will not be importable from this module from v5. "
+ "Please import from transformers.image_transforms instead.",
+ FutureWarning,
+ )
+ return _rgb_to_id(x)
+
+
+class YolosFeatureExtractor(YolosImageProcessor):
+ def __init__(self, *args, **kwargs) -> None:
+ warnings.warn(
+ "The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
+ " use YolosImageProcessor instead.",
+ FutureWarning,
+ )
+ super().__init__(*args, **kwargs)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/yolos/image_processing_yolos.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/yolos/image_processing_yolos.py
new file mode 100644
index 0000000000000000000000000000000000000000..c4e44854a0da4322044017f395e6ad400f92f6b7
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/yolos/image_processing_yolos.py
@@ -0,0 +1,1447 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Image processor class for YOLOS."""
+
+import pathlib
+from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union
+
+import numpy as np
+
+from ...feature_extraction_utils import BatchFeature
+from ...image_processing_utils import BaseImageProcessor, get_size_dict
+from ...image_transforms import (
+ PaddingMode,
+ center_to_corners_format,
+ corners_to_center_format,
+ id_to_rgb,
+ pad,
+ rescale,
+ resize,
+ rgb_to_id,
+ to_channel_dimension_format,
+)
+from ...image_utils import (
+ IMAGENET_DEFAULT_MEAN,
+ IMAGENET_DEFAULT_STD,
+ AnnotationFormat,
+ AnnotationType,
+ ChannelDimension,
+ ImageInput,
+ PILImageResampling,
+ get_image_size,
+ infer_channel_dimension_format,
+ is_scaled_image,
+ make_list_of_images,
+ to_numpy_array,
+ valid_images,
+ validate_annotations,
+ validate_kwargs,
+ validate_preprocess_arguments,
+)
+from ...utils import (
+ TensorType,
+ is_flax_available,
+ is_jax_tensor,
+ is_scipy_available,
+ is_tf_available,
+ is_tf_tensor,
+ is_torch_available,
+ is_torch_tensor,
+ is_vision_available,
+ logging,
+)
+
+
+if is_torch_available():
+ import torch
+ from torch import nn
+
+
+if is_vision_available():
+ import PIL
+
+
+if is_scipy_available():
+ import scipy.special
+ import scipy.stats
+
+logger = logging.get_logger(__name__)
+
+SUPPORTED_ANNOTATION_FORMATS = (AnnotationFormat.COCO_DETECTION, AnnotationFormat.COCO_PANOPTIC)
+
+
+# Copied from transformers.models.detr.image_processing_detr.get_max_height_width
+def get_max_height_width(
+ images: List[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]] = None
+) -> List[int]:
+ """
+ Get the maximum height and width across all images in a batch.
+ """
+ if input_data_format is None:
+ input_data_format = infer_channel_dimension_format(images[0])
+
+ if input_data_format == ChannelDimension.FIRST:
+ _, max_height, max_width = max_across_indices([img.shape for img in images])
+ elif input_data_format == ChannelDimension.LAST:
+ max_height, max_width, _ = max_across_indices([img.shape for img in images])
+ else:
+ raise ValueError(f"Invalid channel dimension format: {input_data_format}")
+ return (max_height, max_width)
+
+
+def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]:
+ """
+ Computes the output image size given the input image size and the desired output size.
+
+ Args:
+ image_size (`Tuple[int, int]`):
+ The input image size.
+ size (`int`):
+ The desired output size.
+ max_size (`int`, *optional*):
+ The maximum allowed output size.
+ """
+ height, width = image_size
+ if max_size is not None:
+ min_original_size = float(min((height, width)))
+ max_original_size = float(max((height, width)))
+ if max_original_size / min_original_size * size > max_size:
+ size = int(round(max_size * min_original_size / max_original_size))
+
+ if width < height and width != size:
+ height = int(size * height / width)
+ width = size
+ elif height < width and height != size:
+ width = int(size * width / height)
+ height = size
+ width_mod = np.mod(width, 16)
+ height_mod = np.mod(height, 16)
+ width = width - width_mod
+ height = height - height_mod
+ return (height, width)
+
+
+# Copied from transformers.models.detr.image_processing_detr.get_resize_output_image_size
+def get_resize_output_image_size(
+ input_image: np.ndarray,
+ size: Union[int, Tuple[int, int], List[int]],
+ max_size: Optional[int] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+) -> Tuple[int, int]:
+ """
+ Computes the output image size given the input image size and the desired output size. If the desired output size
+ is a tuple or list, the output image size is returned as is. If the desired output size is an integer, the output
+ image size is computed by keeping the aspect ratio of the input image size.
+
+ Args:
+ input_image (`np.ndarray`):
+ The image to resize.
+ size (`int` or `Tuple[int, int]` or `List[int]`):
+ The desired output size.
+ max_size (`int`, *optional*):
+ The maximum allowed output size.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred from the input image.
+ """
+ image_size = get_image_size(input_image, input_data_format)
+ if isinstance(size, (list, tuple)):
+ return size
+
+ return get_size_with_aspect_ratio(image_size, size, max_size)
+
+
+# Copied from transformers.models.detr.image_processing_detr.get_numpy_to_framework_fn
+def get_numpy_to_framework_fn(arr) -> Callable:
+ """
+ Returns a function that converts a numpy array to the framework of the input array.
+
+ Args:
+ arr (`np.ndarray`): The array to convert.
+ """
+ if isinstance(arr, np.ndarray):
+ return np.array
+ if is_tf_available() and is_tf_tensor(arr):
+ import tensorflow as tf
+
+ return tf.convert_to_tensor
+ if is_torch_available() and is_torch_tensor(arr):
+ import torch
+
+ return torch.tensor
+ if is_flax_available() and is_jax_tensor(arr):
+ import jax.numpy as jnp
+
+ return jnp.array
+ raise ValueError(f"Cannot convert arrays of type {type(arr)}")
+
+
+# Copied from transformers.models.detr.image_processing_detr.safe_squeeze
+def safe_squeeze(arr: np.ndarray, axis: Optional[int] = None) -> np.ndarray:
+ """
+ Squeezes an array, but only if the axis specified has dim 1.
+ """
+ if axis is None:
+ return arr.squeeze()
+
+ try:
+ return arr.squeeze(axis=axis)
+ except ValueError:
+ return arr
+
+
+# Copied from transformers.models.detr.image_processing_detr.normalize_annotation
+def normalize_annotation(annotation: Dict, image_size: Tuple[int, int]) -> Dict:
+ image_height, image_width = image_size
+ norm_annotation = {}
+ for key, value in annotation.items():
+ if key == "boxes":
+ boxes = value
+ boxes = corners_to_center_format(boxes)
+ boxes /= np.asarray([image_width, image_height, image_width, image_height], dtype=np.float32)
+ norm_annotation[key] = boxes
+ else:
+ norm_annotation[key] = value
+ return norm_annotation
+
+
+# Copied from transformers.models.detr.image_processing_detr.max_across_indices
+def max_across_indices(values: Iterable[Any]) -> List[Any]:
+ """
+ Return the maximum value across all indices of an iterable of values.
+ """
+ return [max(values_i) for values_i in zip(*values)]
+
+
+# Copied from transformers.models.detr.image_processing_detr.make_pixel_mask
+def make_pixel_mask(
+ image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None
+) -> np.ndarray:
+ """
+ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
+
+ Args:
+ image (`np.ndarray`):
+ Image to make the pixel mask for.
+ output_size (`Tuple[int, int]`):
+ Output size of the mask.
+ """
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
+ mask = np.zeros(output_size, dtype=np.int64)
+ mask[:input_height, :input_width] = 1
+ return mask
+
+
+# Copied from transformers.models.detr.image_processing_detr.convert_coco_poly_to_mask
+def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray:
+ """
+ Convert a COCO polygon annotation to a mask.
+
+ Args:
+ segmentations (`List[List[float]]`):
+ List of polygons, each polygon represented by a list of x-y coordinates.
+ height (`int`):
+ Height of the mask.
+ width (`int`):
+ Width of the mask.
+ """
+ try:
+ from pycocotools import mask as coco_mask
+ except ImportError:
+ raise ImportError("Pycocotools is not installed in your environment.")
+
+ masks = []
+ for polygons in segmentations:
+ rles = coco_mask.frPyObjects(polygons, height, width)
+ mask = coco_mask.decode(rles)
+ if len(mask.shape) < 3:
+ mask = mask[..., None]
+ mask = np.asarray(mask, dtype=np.uint8)
+ mask = np.any(mask, axis=2)
+ masks.append(mask)
+ if masks:
+ masks = np.stack(masks, axis=0)
+ else:
+ masks = np.zeros((0, height, width), dtype=np.uint8)
+
+ return masks
+
+
+# Copied from transformers.models.detr.image_processing_detr.prepare_coco_detection_annotation
+def prepare_coco_detection_annotation(
+ image,
+ target,
+ return_segmentation_masks: bool = False,
+ input_data_format: Optional[Union[ChannelDimension, str]] = None,
+):
+ """
+ Convert the target in COCO format into the format expected by DETR.
+ """
+ image_height, image_width = get_image_size(image, channel_dim=input_data_format)
+
+ image_id = target["image_id"]
+ image_id = np.asarray([image_id], dtype=np.int64)
+
+ # Get all COCO annotations for the given image.
+ annotations = target["annotations"]
+ annotations = [obj for obj in annotations if "iscrowd" not in obj or obj["iscrowd"] == 0]
+
+ classes = [obj["category_id"] for obj in annotations]
+ classes = np.asarray(classes, dtype=np.int64)
+
+ # for conversion to coco api
+ area = np.asarray([obj["area"] for obj in annotations], dtype=np.float32)
+ iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in annotations], dtype=np.int64)
+
+ boxes = [obj["bbox"] for obj in annotations]
+ # guard against no boxes via resizing
+ boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4)
+ boxes[:, 2:] += boxes[:, :2]
+ boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width)
+ boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height)
+
+ keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
+
+ new_target = {}
+ new_target["image_id"] = image_id
+ new_target["class_labels"] = classes[keep]
+ new_target["boxes"] = boxes[keep]
+ new_target["area"] = area[keep]
+ new_target["iscrowd"] = iscrowd[keep]
+ new_target["orig_size"] = np.asarray([int(image_height), int(image_width)], dtype=np.int64)
+
+ if annotations and "keypoints" in annotations[0]:
+ keypoints = [obj["keypoints"] for obj in annotations]
+ # Converting the filtered keypoints list to a numpy array
+ keypoints = np.asarray(keypoints, dtype=np.float32)
+ # Apply the keep mask here to filter the relevant annotations
+ keypoints = keypoints[keep]
+ num_keypoints = keypoints.shape[0]
+ keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints
+ new_target["keypoints"] = keypoints
+
+ if return_segmentation_masks:
+ segmentation_masks = [obj["segmentation"] for obj in annotations]
+ masks = convert_coco_poly_to_mask(segmentation_masks, image_height, image_width)
+ new_target["masks"] = masks[keep]
+
+ return new_target
+
+
+# Copied from transformers.models.detr.image_processing_detr.masks_to_boxes
+def masks_to_boxes(masks: np.ndarray) -> np.ndarray:
+ """
+ Compute the bounding boxes around the provided panoptic segmentation masks.
+
+ Args:
+ masks: masks in format `[number_masks, height, width]` where N is the number of masks
+
+ Returns:
+ boxes: bounding boxes in format `[number_masks, 4]` in xyxy format
+ """
+ if masks.size == 0:
+ return np.zeros((0, 4))
+
+ h, w = masks.shape[-2:]
+ y = np.arange(0, h, dtype=np.float32)
+ x = np.arange(0, w, dtype=np.float32)
+ # see https://github.com/pytorch/pytorch/issues/50276
+ y, x = np.meshgrid(y, x, indexing="ij")
+
+ x_mask = masks * np.expand_dims(x, axis=0)
+ x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1)
+ x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool)))
+ x_min = x.filled(fill_value=1e8)
+ x_min = x_min.reshape(x_min.shape[0], -1).min(-1)
+
+ y_mask = masks * np.expand_dims(y, axis=0)
+ y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1)
+ y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool)))
+ y_min = y.filled(fill_value=1e8)
+ y_min = y_min.reshape(y_min.shape[0], -1).min(-1)
+
+ return np.stack([x_min, y_min, x_max, y_max], 1)
+
+
+# Copied from transformers.models.detr.image_processing_detr.prepare_coco_panoptic_annotation with DETR->YOLOS
+def prepare_coco_panoptic_annotation(
+ image: np.ndarray,
+ target: Dict,
+ masks_path: Union[str, pathlib.Path],
+ return_masks: bool = True,
+ input_data_format: Union[ChannelDimension, str] = None,
+) -> Dict:
+ """
+ Prepare a coco panoptic annotation for YOLOS.
+ """
+ image_height, image_width = get_image_size(image, channel_dim=input_data_format)
+ annotation_path = pathlib.Path(masks_path) / target["file_name"]
+
+ new_target = {}
+ new_target["image_id"] = np.asarray([target["image_id"] if "image_id" in target else target["id"]], dtype=np.int64)
+ new_target["size"] = np.asarray([image_height, image_width], dtype=np.int64)
+ new_target["orig_size"] = np.asarray([image_height, image_width], dtype=np.int64)
+
+ if "segments_info" in target:
+ masks = np.asarray(PIL.Image.open(annotation_path), dtype=np.uint32)
+ masks = rgb_to_id(masks)
+
+ ids = np.array([segment_info["id"] for segment_info in target["segments_info"]])
+ masks = masks == ids[:, None, None]
+ masks = masks.astype(np.uint8)
+ if return_masks:
+ new_target["masks"] = masks
+ new_target["boxes"] = masks_to_boxes(masks)
+ new_target["class_labels"] = np.array(
+ [segment_info["category_id"] for segment_info in target["segments_info"]], dtype=np.int64
+ )
+ new_target["iscrowd"] = np.asarray(
+ [segment_info["iscrowd"] for segment_info in target["segments_info"]], dtype=np.int64
+ )
+ new_target["area"] = np.asarray(
+ [segment_info["area"] for segment_info in target["segments_info"]], dtype=np.float32
+ )
+
+ return new_target
+
+
+# Copied from transformers.models.detr.image_processing_detr.get_segmentation_image
+def get_segmentation_image(
+ masks: np.ndarray, input_size: Tuple, target_size: Tuple, stuff_equiv_classes, deduplicate=False
+):
+ h, w = input_size
+ final_h, final_w = target_size
+
+ m_id = scipy.special.softmax(masks.transpose(0, 1), -1)
+
+ if m_id.shape[-1] == 0:
+ # We didn't detect any mask :(
+ m_id = np.zeros((h, w), dtype=np.int64)
+ else:
+ m_id = m_id.argmax(-1).reshape(h, w)
+
+ if deduplicate:
+ # Merge the masks corresponding to the same stuff class
+ for equiv in stuff_equiv_classes.values():
+ for eq_id in equiv:
+ m_id[m_id == eq_id] = equiv[0]
+
+ seg_img = id_to_rgb(m_id)
+ seg_img = resize(seg_img, (final_w, final_h), resample=PILImageResampling.NEAREST)
+ return seg_img
+
+
+# Copied from transformers.models.detr.image_processing_detr.get_mask_area
+def get_mask_area(seg_img: np.ndarray, target_size: Tuple[int, int], n_classes: int) -> np.ndarray:
+ final_h, final_w = target_size
+ np_seg_img = seg_img.astype(np.uint8)
+ np_seg_img = np_seg_img.reshape(final_h, final_w, 3)
+ m_id = rgb_to_id(np_seg_img)
+ area = [(m_id == i).sum() for i in range(n_classes)]
+ return area
+
+
+# Copied from transformers.models.detr.image_processing_detr.score_labels_from_class_probabilities
+def score_labels_from_class_probabilities(logits: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
+ probs = scipy.special.softmax(logits, axis=-1)
+ labels = probs.argmax(-1, keepdims=True)
+ scores = np.take_along_axis(probs, labels, axis=-1)
+ scores, labels = scores.squeeze(-1), labels.squeeze(-1)
+ return scores, labels
+
+
+# Copied from transformers.models.detr.image_processing_detr.resize_annotation
+def resize_annotation(
+ annotation: Dict[str, Any],
+ orig_size: Tuple[int, int],
+ target_size: Tuple[int, int],
+ threshold: float = 0.5,
+ resample: PILImageResampling = PILImageResampling.NEAREST,
+):
+ """
+ Resizes an annotation to a target size.
+
+ Args:
+ annotation (`Dict[str, Any]`):
+ The annotation dictionary.
+ orig_size (`Tuple[int, int]`):
+ The original size of the input image.
+ target_size (`Tuple[int, int]`):
+ The target size of the image, as returned by the preprocessing `resize` step.
+ threshold (`float`, *optional*, defaults to 0.5):
+ The threshold used to binarize the segmentation masks.
+ resample (`PILImageResampling`, defaults to `PILImageResampling.NEAREST`):
+ The resampling filter to use when resizing the masks.
+ """
+ ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(target_size, orig_size))
+ ratio_height, ratio_width = ratios
+
+ new_annotation = {}
+ new_annotation["size"] = target_size
+
+ for key, value in annotation.items():
+ if key == "boxes":
+ boxes = value
+ scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32)
+ new_annotation["boxes"] = scaled_boxes
+ elif key == "area":
+ area = value
+ scaled_area = area * (ratio_width * ratio_height)
+ new_annotation["area"] = scaled_area
+ elif key == "masks":
+ masks = value[:, None]
+ masks = np.array([resize(mask, target_size, resample=resample) for mask in masks])
+ masks = masks.astype(np.float32)
+ masks = masks[:, 0] > threshold
+ new_annotation["masks"] = masks
+ elif key == "size":
+ new_annotation["size"] = target_size
+ else:
+ new_annotation[key] = value
+
+ return new_annotation
+
+
+# Copied from transformers.models.detr.image_processing_detr.binary_mask_to_rle
+def binary_mask_to_rle(mask):
+ """
+ Converts given binary mask of shape `(height, width)` to the run-length encoding (RLE) format.
+
+ Args:
+ mask (`torch.Tensor` or `numpy.array`):
+ A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target
+ segment_id or class_id.
+ Returns:
+ `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE
+ format.
+ """
+ if is_torch_tensor(mask):
+ mask = mask.numpy()
+
+ pixels = mask.flatten()
+ pixels = np.concatenate([[0], pixels, [0]])
+ runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
+ runs[1::2] -= runs[::2]
+ return list(runs)
+
+
+# Copied from transformers.models.detr.image_processing_detr.convert_segmentation_to_rle
+def convert_segmentation_to_rle(segmentation):
+ """
+ Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format.
+
+ Args:
+ segmentation (`torch.Tensor` or `numpy.array`):
+ A segmentation map of shape `(height, width)` where each value denotes a segment or class id.
+ Returns:
+ `List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id.
+ """
+ segment_ids = torch.unique(segmentation)
+
+ run_length_encodings = []
+ for idx in segment_ids:
+ mask = torch.where(segmentation == idx, 1, 0)
+ rle = binary_mask_to_rle(mask)
+ run_length_encodings.append(rle)
+
+ return run_length_encodings
+
+
+# Copied from transformers.models.detr.image_processing_detr.remove_low_and_no_objects
+def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels):
+ """
+ Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and
+ `labels`.
+
+ Args:
+ masks (`torch.Tensor`):
+ A tensor of shape `(num_queries, height, width)`.
+ scores (`torch.Tensor`):
+ A tensor of shape `(num_queries)`.
+ labels (`torch.Tensor`):
+ A tensor of shape `(num_queries)`.
+ object_mask_threshold (`float`):
+ A number between 0 and 1 used to binarize the masks.
+ Raises:
+ `ValueError`: Raised when the first dimension doesn't match in all input tensors.
+ Returns:
+ `Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region
+ < `object_mask_threshold`.
+ """
+ if not (masks.shape[0] == scores.shape[0] == labels.shape[0]):
+ raise ValueError("mask, scores and labels must have the same shape!")
+
+ to_keep = labels.ne(num_labels) & (scores > object_mask_threshold)
+
+ return masks[to_keep], scores[to_keep], labels[to_keep]
+
+
+# Copied from transformers.models.detr.image_processing_detr.check_segment_validity
+def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8):
+ # Get the mask associated with the k class
+ mask_k = mask_labels == k
+ mask_k_area = mask_k.sum()
+
+ # Compute the area of all the stuff in query k
+ original_area = (mask_probs[k] >= mask_threshold).sum()
+ mask_exists = mask_k_area > 0 and original_area > 0
+
+ # Eliminate disconnected tiny segments
+ if mask_exists:
+ area_ratio = mask_k_area / original_area
+ if not area_ratio.item() > overlap_mask_area_threshold:
+ mask_exists = False
+
+ return mask_exists, mask_k
+
+
+# Copied from transformers.models.detr.image_processing_detr.compute_segments
+def compute_segments(
+ mask_probs,
+ pred_scores,
+ pred_labels,
+ mask_threshold: float = 0.5,
+ overlap_mask_area_threshold: float = 0.8,
+ label_ids_to_fuse: Optional[Set[int]] = None,
+ target_size: Tuple[int, int] = None,
+):
+ height = mask_probs.shape[1] if target_size is None else target_size[0]
+ width = mask_probs.shape[2] if target_size is None else target_size[1]
+
+ segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device)
+ segments: List[Dict] = []
+
+ if target_size is not None:
+ mask_probs = nn.functional.interpolate(
+ mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False
+ )[0]
+
+ current_segment_id = 0
+
+ # Weigh each mask by its prediction score
+ mask_probs *= pred_scores.view(-1, 1, 1)
+ mask_labels = mask_probs.argmax(0) # [height, width]
+
+ # Keep track of instances of each class
+ stuff_memory_list: Dict[str, int] = {}
+ for k in range(pred_labels.shape[0]):
+ pred_class = pred_labels[k].item()
+ should_fuse = pred_class in label_ids_to_fuse
+
+ # Check if mask exists and large enough to be a segment
+ mask_exists, mask_k = check_segment_validity(
+ mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold
+ )
+
+ if mask_exists:
+ if pred_class in stuff_memory_list:
+ current_segment_id = stuff_memory_list[pred_class]
+ else:
+ current_segment_id += 1
+
+ # Add current object segment to final segmentation map
+ segmentation[mask_k] = current_segment_id
+ segment_score = round(pred_scores[k].item(), 6)
+ segments.append(
+ {
+ "id": current_segment_id,
+ "label_id": pred_class,
+ "was_fused": should_fuse,
+ "score": segment_score,
+ }
+ )
+ if should_fuse:
+ stuff_memory_list[pred_class] = current_segment_id
+
+ return segmentation, segments
+
+
+class YolosImageProcessor(BaseImageProcessor):
+ r"""
+ Constructs a Detr image processor.
+
+ Args:
+ format (`str`, *optional*, defaults to `"coco_detection"`):
+ Data format of the annotations. One of "coco_detection" or "coco_panoptic".
+ do_resize (`bool`, *optional*, defaults to `True`):
+ Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be
+ overridden by the `do_resize` parameter in the `preprocess` method.
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 800, "longest_edge": 1333}`):
+ Size of the image's (height, width) dimensions after resizing. Can be overridden by the `size` parameter in
+ the `preprocess` method.
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
+ Resampling filter to use if resizing the image.
+ do_rescale (`bool`, *optional*, defaults to `True`):
+ Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
+ `do_rescale` parameter in the `preprocess` method.
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
+ `preprocess` method.
+ do_normalize:
+ Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the
+ `preprocess` method.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`):
+ Mean values to use when normalizing the image. Can be a single value or a list of values, one for each
+ channel. Can be overridden by the `image_mean` parameter in the `preprocess` method.
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`):
+ Standard deviation values to use when normalizing the image. Can be a single value or a list of values, one
+ for each channel. Can be overridden by the `image_std` parameter in the `preprocess` method.
+ do_pad (`bool`, *optional*, defaults to `True`):
+ Controls whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess`
+ method. If `True` will pad the images in the batch to the largest height and width in the batch.
+ Padding will be applied to the bottom and right of the image with zeros.
+ """
+
+ model_input_names = ["pixel_values", "pixel_mask"]
+
+ def __init__(
+ self,
+ format: Union[str, AnnotationFormat] = AnnotationFormat.COCO_DETECTION,
+ do_resize: bool = True,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
+ do_rescale: bool = True,
+ rescale_factor: Union[int, float] = 1 / 255,
+ do_normalize: bool = True,
+ image_mean: Union[float, List[float]] = None,
+ image_std: Union[float, List[float]] = None,
+ do_convert_annotations: Optional[bool] = None,
+ do_pad: bool = True,
+ **kwargs,
+ ) -> None:
+ if "pad_and_return_pixel_mask" in kwargs:
+ do_pad = kwargs.pop("pad_and_return_pixel_mask")
+
+ if "max_size" in kwargs:
+ logger.warning_once(
+ "The `max_size` parameter is deprecated and will be removed in v4.26. "
+ "Please specify in `size['longest_edge'] instead`.",
+ )
+ max_size = kwargs.pop("max_size")
+ else:
+ max_size = None if size is None else 1333
+
+ size = size if size is not None else {"shortest_edge": 800, "longest_edge": 1333}
+ size = get_size_dict(size, max_size=max_size, default_to_square=False)
+
+ # Backwards compatibility
+ if do_convert_annotations is None:
+ do_convert_annotations = do_normalize
+
+ super().__init__(**kwargs)
+ self.format = format
+ self.do_resize = do_resize
+ self.size = size
+ self.resample = resample
+ self.do_rescale = do_rescale
+ self.rescale_factor = rescale_factor
+ self.do_normalize = do_normalize
+ self.do_convert_annotations = do_convert_annotations
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
+ self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
+ self.do_pad = do_pad
+ self._valid_processor_keys = [
+ "images",
+ "annotations",
+ "return_segmentation_masks",
+ "masks_path",
+ "do_resize",
+ "size",
+ "resample",
+ "do_rescale",
+ "rescale_factor",
+ "do_normalize",
+ "image_mean",
+ "image_std",
+ "do_convert_annotations",
+ "do_pad",
+ "format",
+ "return_tensors",
+ "data_format",
+ "input_data_format",
+ ]
+
+ @classmethod
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.from_dict with Detr->Yolos
+ def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs):
+ """
+ Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is
+ created using from_dict and kwargs e.g. `YolosImageProcessor.from_pretrained(checkpoint, size=600,
+ max_size=800)`
+ """
+ image_processor_dict = image_processor_dict.copy()
+ if "max_size" in kwargs:
+ image_processor_dict["max_size"] = kwargs.pop("max_size")
+ if "pad_and_return_pixel_mask" in kwargs:
+ image_processor_dict["pad_and_return_pixel_mask"] = kwargs.pop("pad_and_return_pixel_mask")
+ return super().from_dict(image_processor_dict, **kwargs)
+
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_annotation
+ def prepare_annotation(
+ self,
+ image: np.ndarray,
+ target: Dict,
+ format: Optional[AnnotationFormat] = None,
+ return_segmentation_masks: bool = None,
+ masks_path: Optional[Union[str, pathlib.Path]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> Dict:
+ """
+ Prepare an annotation for feeding into DETR model.
+ """
+ format = format if format is not None else self.format
+
+ if format == AnnotationFormat.COCO_DETECTION:
+ return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks
+ target = prepare_coco_detection_annotation(
+ image, target, return_segmentation_masks, input_data_format=input_data_format
+ )
+ elif format == AnnotationFormat.COCO_PANOPTIC:
+ return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks
+ target = prepare_coco_panoptic_annotation(
+ image,
+ target,
+ masks_path=masks_path,
+ return_masks=return_segmentation_masks,
+ input_data_format=input_data_format,
+ )
+ else:
+ raise ValueError(f"Format {format} is not supported.")
+ return target
+
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare
+ def prepare(self, image, target, return_segmentation_masks=None, masks_path=None):
+ logger.warning_once(
+ "The `prepare` method is deprecated and will be removed in a v4.33. "
+ "Please use `prepare_annotation` instead. Note: the `prepare_annotation` method "
+ "does not return the image anymore.",
+ )
+ target = self.prepare_annotation(image, target, return_segmentation_masks, masks_path, self.format)
+ return image, target
+
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.convert_coco_poly_to_mask
+ def convert_coco_poly_to_mask(self, *args, **kwargs):
+ logger.warning_once("The `convert_coco_poly_to_mask` method is deprecated and will be removed in v4.33. ")
+ return convert_coco_poly_to_mask(*args, **kwargs)
+
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_detection with DETR->Yolos
+ def prepare_coco_detection(self, *args, **kwargs):
+ logger.warning_once("The `prepare_coco_detection` method is deprecated and will be removed in v4.33. ")
+ return prepare_coco_detection_annotation(*args, **kwargs)
+
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_panoptic
+ def prepare_coco_panoptic(self, *args, **kwargs):
+ logger.warning_once("The `prepare_coco_panoptic` method is deprecated and will be removed in v4.33. ")
+ return prepare_coco_panoptic_annotation(*args, **kwargs)
+
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize
+ def resize(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
+ data_format: Optional[ChannelDimension] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> np.ndarray:
+ """
+ Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an
+ int, smaller edge of the image will be matched to this number.
+
+ Args:
+ image (`np.ndarray`):
+ Image to resize.
+ size (`Dict[str, int]`):
+ Dictionary containing the size to resize to. Can contain the keys `shortest_edge` and `longest_edge` or
+ `height` and `width`.
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
+ Resampling filter to use if resizing the image.
+ data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
+ image is used.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred.
+ """
+ if "max_size" in kwargs:
+ logger.warning_once(
+ "The `max_size` parameter is deprecated and will be removed in v4.26. "
+ "Please specify in `size['longest_edge'] instead`.",
+ )
+ max_size = kwargs.pop("max_size")
+ else:
+ max_size = None
+ size = get_size_dict(size, max_size=max_size, default_to_square=False)
+ if "shortest_edge" in size and "longest_edge" in size:
+ size = get_resize_output_image_size(
+ image, size["shortest_edge"], size["longest_edge"], input_data_format=input_data_format
+ )
+ elif "height" in size and "width" in size:
+ size = (size["height"], size["width"])
+ else:
+ raise ValueError(
+ "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got"
+ f" {size.keys()}."
+ )
+ image = resize(
+ image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs
+ )
+ return image
+
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize_annotation
+ def resize_annotation(
+ self,
+ annotation,
+ orig_size,
+ size,
+ resample: PILImageResampling = PILImageResampling.NEAREST,
+ ) -> Dict:
+ """
+ Resize the annotation to match the resized image. If size is an int, smaller edge of the mask will be matched
+ to this number.
+ """
+ return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample)
+
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale
+ def rescale(
+ self,
+ image: np.ndarray,
+ rescale_factor: float,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> np.ndarray:
+ """
+ Rescale the image by the given factor. image = image * rescale_factor.
+
+ Args:
+ image (`np.ndarray`):
+ Image to rescale.
+ rescale_factor (`float`):
+ The value to use for rescaling.
+ data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
+ image is used. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ input_data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format for the input image. If unset, is inferred from the input image. Can be
+ one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ """
+ return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format)
+
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize_annotation
+ def normalize_annotation(self, annotation: Dict, image_size: Tuple[int, int]) -> Dict:
+ """
+ Normalize the boxes in the annotation from `[top_left_x, top_left_y, bottom_right_x, bottom_right_y]` to
+ `[center_x, center_y, width, height]` format and from absolute to relative pixel values.
+ """
+ return normalize_annotation(annotation, image_size=image_size)
+
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._update_annotation_for_padded_image
+ def _update_annotation_for_padded_image(
+ self,
+ annotation: Dict,
+ input_image_size: Tuple[int, int],
+ output_image_size: Tuple[int, int],
+ padding,
+ update_bboxes,
+ ) -> Dict:
+ """
+ Update the annotation for a padded image.
+ """
+ new_annotation = {}
+ new_annotation["size"] = output_image_size
+
+ for key, value in annotation.items():
+ if key == "masks":
+ masks = value
+ masks = pad(
+ masks,
+ padding,
+ mode=PaddingMode.CONSTANT,
+ constant_values=0,
+ input_data_format=ChannelDimension.FIRST,
+ )
+ masks = safe_squeeze(masks, 1)
+ new_annotation["masks"] = masks
+ elif key == "boxes" and update_bboxes:
+ boxes = value
+ boxes *= np.asarray(
+ [
+ input_image_size[1] / output_image_size[1],
+ input_image_size[0] / output_image_size[0],
+ input_image_size[1] / output_image_size[1],
+ input_image_size[0] / output_image_size[0],
+ ]
+ )
+ new_annotation["boxes"] = boxes
+ elif key == "size":
+ new_annotation["size"] = output_image_size
+ else:
+ new_annotation[key] = value
+ return new_annotation
+
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._pad_image
+ def _pad_image(
+ self,
+ image: np.ndarray,
+ output_size: Tuple[int, int],
+ annotation: Optional[Dict[str, Any]] = None,
+ constant_values: Union[float, Iterable[float]] = 0,
+ data_format: Optional[ChannelDimension] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ update_bboxes: bool = True,
+ ) -> np.ndarray:
+ """
+ Pad an image with zeros to the given size.
+ """
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
+ output_height, output_width = output_size
+
+ pad_bottom = output_height - input_height
+ pad_right = output_width - input_width
+ padding = ((0, pad_bottom), (0, pad_right))
+ padded_image = pad(
+ image,
+ padding,
+ mode=PaddingMode.CONSTANT,
+ constant_values=constant_values,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ )
+ if annotation is not None:
+ annotation = self._update_annotation_for_padded_image(
+ annotation, (input_height, input_width), (output_height, output_width), padding, update_bboxes
+ )
+ return padded_image, annotation
+
+ def pad(
+ self,
+ images: List[np.ndarray],
+ annotations: Optional[List[Dict[str, Any]]] = None,
+ constant_values: Union[float, Iterable[float]] = 0,
+ return_pixel_mask: bool = False,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ data_format: Optional[ChannelDimension] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ update_bboxes: bool = True,
+ ) -> BatchFeature:
+ """
+ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width
+ in the batch and optionally returns their corresponding pixel mask.
+
+ Args:
+ image (`np.ndarray`):
+ Image to pad.
+ annotations (`List[Dict[str, any]]`, *optional*):
+ Annotations to pad along with the images. If provided, the bounding boxes will be updated to match the
+ padded images.
+ constant_values (`float` or `Iterable[float]`, *optional*):
+ The value to use for the padding if `mode` is `"constant"`.
+ return_pixel_mask (`bool`, *optional*, defaults to `True`):
+ Whether to return a pixel mask.
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Can be one of:
+ - Unset: Return a list of `np.ndarray`.
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
+ data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred.
+ update_bboxes (`bool`, *optional*, defaults to `True`):
+ Whether to update the bounding boxes in the annotations to match the padded images. If the
+ bounding boxes have not been converted to relative coordinates and `(centre_x, centre_y, width, height)`
+ format, the bounding boxes will not be updated.
+ """
+ pad_size = get_max_height_width(images, input_data_format=input_data_format)
+
+ annotation_list = annotations if annotations is not None else [None] * len(images)
+ padded_images = []
+ padded_annotations = []
+ for image, annotation in zip(images, annotation_list):
+ padded_image, padded_annotation = self._pad_image(
+ image,
+ pad_size,
+ annotation,
+ constant_values=constant_values,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ update_bboxes=update_bboxes,
+ )
+ padded_images.append(padded_image)
+ padded_annotations.append(padded_annotation)
+
+ data = {"pixel_values": padded_images}
+
+ if return_pixel_mask:
+ masks = [
+ make_pixel_mask(image=image, output_size=pad_size, input_data_format=input_data_format)
+ for image in images
+ ]
+ data["pixel_mask"] = masks
+
+ encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
+
+ if annotations is not None:
+ encoded_inputs["labels"] = [
+ BatchFeature(annotation, tensor_type=return_tensors) for annotation in padded_annotations
+ ]
+
+ return encoded_inputs
+
+ def preprocess(
+ self,
+ images: ImageInput,
+ annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None,
+ return_segmentation_masks: bool = None,
+ masks_path: Optional[Union[str, pathlib.Path]] = None,
+ do_resize: Optional[bool] = None,
+ size: Optional[Dict[str, int]] = None,
+ resample=None, # PILImageResampling
+ do_rescale: Optional[bool] = None,
+ rescale_factor: Optional[Union[int, float]] = None,
+ do_normalize: Optional[bool] = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ do_convert_annotations: Optional[bool] = None,
+ do_pad: Optional[bool] = None,
+ format: Optional[Union[str, AnnotationFormat]] = None,
+ return_tensors: Optional[Union[TensorType, str]] = None,
+ data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> BatchFeature:
+ """
+ Preprocess an image or a batch of images so that it can be used by the model.
+
+ Args:
+ images (`ImageInput`):
+ Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging
+ from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`.
+ annotations (`AnnotationType` or `List[AnnotationType]`, *optional*):
+ List of annotations associated with the image or batch of images. If annotation is for object
+ detection, the annotations should be a dictionary with the following keys:
+ - "image_id" (`int`): The image id.
+ - "annotations" (`List[Dict]`): List of annotations for an image. Each annotation should be a
+ dictionary. An image can have no annotations, in which case the list should be empty.
+ If annotation is for segmentation, the annotations should be a dictionary with the following keys:
+ - "image_id" (`int`): The image id.
+ - "segments_info" (`List[Dict]`): List of segments for an image. Each segment should be a dictionary.
+ An image can have no segments, in which case the list should be empty.
+ - "file_name" (`str`): The file name of the image.
+ return_segmentation_masks (`bool`, *optional*, defaults to self.return_segmentation_masks):
+ Whether to return segmentation masks.
+ masks_path (`str` or `pathlib.Path`, *optional*):
+ Path to the directory containing the segmentation masks.
+ do_resize (`bool`, *optional*, defaults to self.do_resize):
+ Whether to resize the image.
+ size (`Dict[str, int]`, *optional*, defaults to self.size):
+ Size of the image after resizing.
+ resample (`PILImageResampling`, *optional*, defaults to self.resample):
+ Resampling filter to use when resizing the image.
+ do_rescale (`bool`, *optional*, defaults to self.do_rescale):
+ Whether to rescale the image.
+ rescale_factor (`float`, *optional*, defaults to self.rescale_factor):
+ Rescale factor to use when rescaling the image.
+ do_normalize (`bool`, *optional*, defaults to self.do_normalize):
+ Whether to normalize the image.
+ image_mean (`float` or `List[float]`, *optional*, defaults to self.image_mean):
+ Mean to use when normalizing the image.
+ image_std (`float` or `List[float]`, *optional*, defaults to self.image_std):
+ Standard deviation to use when normalizing the image.
+ do_convert_annotations (`bool`, *optional*, defaults to self.do_convert_annotations):
+ Whether to convert the annotations to the format expected by the model. Converts the bounding
+ boxes from the format `(top_left_x, top_left_y, width, height)` to `(center_x, center_y, width, height)`
+ and in relative coordinates.
+ do_pad (`bool`, *optional*, defaults to self.do_pad):
+ Whether to pad the image. If `True` will pad the images in the batch to the largest image in the batch
+ and create a pixel mask. Padding will be applied to the bottom and right of the image with zeros.
+ format (`str` or `AnnotationFormat`, *optional*, defaults to self.format):
+ Format of the annotations.
+ return_tensors (`str` or `TensorType`, *optional*, defaults to self.return_tensors):
+ Type of tensors to return. If `None`, will return the list of images.
+ data_format (`str` or `ChannelDimension`, *optional*, defaults to self.data_format):
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ """
+ if "pad_and_return_pixel_mask" in kwargs:
+ logger.warning_once(
+ "The `pad_and_return_pixel_mask` argument is deprecated and will be removed in v4.33, "
+ "use `do_pad` instead.",
+ )
+ do_pad = kwargs.pop("pad_and_return_pixel_mask")
+
+ max_size = None
+ if "max_size" in kwargs:
+ logger.warning_once(
+ "The `max_size` argument is deprecated and will be removed in v4.33, use"
+ " `size['longest_edge']` instead.",
+ )
+ size = kwargs.pop("max_size")
+
+ do_resize = self.do_resize if do_resize is None else do_resize
+ size = self.size if size is None else size
+ size = get_size_dict(size=size, max_size=max_size, default_to_square=False)
+ resample = self.resample if resample is None else resample
+ do_rescale = self.do_rescale if do_rescale is None else do_rescale
+ rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor
+ do_normalize = self.do_normalize if do_normalize is None else do_normalize
+ image_mean = self.image_mean if image_mean is None else image_mean
+ image_std = self.image_std if image_std is None else image_std
+ do_convert_annotations = (
+ self.do_convert_annotations if do_convert_annotations is None else do_convert_annotations
+ )
+ do_pad = self.do_pad if do_pad is None else do_pad
+ format = self.format if format is None else format
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
+
+ images = make_list_of_images(images)
+
+ if not valid_images(images):
+ raise ValueError(
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+ # Here the pad() method pads using the max of (width, height) and does not need to be validated.
+ validate_preprocess_arguments(
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ )
+
+ if annotations is not None and isinstance(annotations, dict):
+ annotations = [annotations]
+
+ if annotations is not None and len(images) != len(annotations):
+ raise ValueError(
+ f"The number of images ({len(images)}) and annotations ({len(annotations)}) do not match."
+ )
+
+ format = AnnotationFormat(format)
+ if annotations is not None:
+ validate_annotations(format, SUPPORTED_ANNOTATION_FORMATS, annotations)
+
+ if (
+ masks_path is not None
+ and format == AnnotationFormat.COCO_PANOPTIC
+ and not isinstance(masks_path, (pathlib.Path, str))
+ ):
+ raise ValueError(
+ "The path to the directory containing the mask PNG files should be provided as a"
+ f" `pathlib.Path` or string object, but is {type(masks_path)} instead."
+ )
+
+ # All transformations expect numpy arrays
+ images = [to_numpy_array(image) for image in images]
+
+ if is_scaled_image(images[0]) and do_rescale:
+ logger.warning_once(
+ "It looks like you are trying to rescale already rescaled images. If the input"
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
+ )
+
+ if input_data_format is None:
+ # We assume that all images have the same channel dimension format.
+ input_data_format = infer_channel_dimension_format(images[0])
+
+ # prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image)
+ if annotations is not None:
+ prepared_images = []
+ prepared_annotations = []
+ for image, target in zip(images, annotations):
+ target = self.prepare_annotation(
+ image,
+ target,
+ format,
+ return_segmentation_masks=return_segmentation_masks,
+ masks_path=masks_path,
+ input_data_format=input_data_format,
+ )
+ prepared_images.append(image)
+ prepared_annotations.append(target)
+ images = prepared_images
+ annotations = prepared_annotations
+ del prepared_images, prepared_annotations
+
+ # transformations
+ if do_resize:
+ if annotations is not None:
+ resized_images, resized_annotations = [], []
+ for image, target in zip(images, annotations):
+ orig_size = get_image_size(image, input_data_format)
+ resized_image = self.resize(
+ image, size=size, max_size=max_size, resample=resample, input_data_format=input_data_format
+ )
+ resized_annotation = self.resize_annotation(
+ target, orig_size, get_image_size(resized_image, input_data_format)
+ )
+ resized_images.append(resized_image)
+ resized_annotations.append(resized_annotation)
+ images = resized_images
+ annotations = resized_annotations
+ del resized_images, resized_annotations
+ else:
+ images = [
+ self.resize(image, size=size, resample=resample, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ if do_rescale:
+ images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images]
+
+ if do_normalize:
+ images = [
+ self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images
+ ]
+
+ if do_convert_annotations and annotations is not None:
+ annotations = [
+ self.normalize_annotation(annotation, get_image_size(image, input_data_format))
+ for annotation, image in zip(annotations, images)
+ ]
+
+ if do_pad:
+ # Pads images and returns their mask: {'pixel_values': ..., 'pixel_mask': ...}
+ encoded_inputs = self.pad(
+ images,
+ annotations=annotations,
+ return_pixel_mask=False,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ update_bboxes=do_convert_annotations,
+ return_tensors=return_tensors,
+ )
+ else:
+ images = [
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
+ for image in images
+ ]
+ encoded_inputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors)
+ if annotations is not None:
+ encoded_inputs["labels"] = [
+ BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations
+ ]
+
+ return encoded_inputs
+
+ # POSTPROCESSING METHODS - TODO: add support for other frameworks
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.post_process with Detr->Yolos
+ def post_process(self, outputs, target_sizes):
+ """
+ Converts the raw output of [`YolosForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
+ bottom_right_x, bottom_right_y) format. Only supports PyTorch.
+
+ Args:
+ outputs ([`YolosObjectDetectionOutput`]):
+ Raw outputs of the model.
+ target_sizes (`torch.Tensor` of shape `(batch_size, 2)`):
+ Tensor containing the size (height, width) of each image of the batch. For evaluation, this must be the
+ original image size (before any data augmentation). For visualization, this should be the image size
+ after data augment, but before padding.
+ Returns:
+ `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
+ in the batch as predicted by the model.
+ """
+ logger.warning_once(
+ "`post_process` is deprecated and will be removed in v5 of Transformers, please use"
+ " `post_process_object_detection` instead, with `threshold=0.` for equivalent results.",
+ )
+
+ out_logits, out_bbox = outputs.logits, outputs.pred_boxes
+
+ if len(out_logits) != len(target_sizes):
+ raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits")
+ if target_sizes.shape[1] != 2:
+ raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch")
+
+ prob = nn.functional.softmax(out_logits, -1)
+ scores, labels = prob[..., :-1].max(-1)
+
+ # convert to [x0, y0, x1, y1] format
+ boxes = center_to_corners_format(out_bbox)
+ # and from relative [0, 1] to absolute [0, height] coordinates
+ img_h, img_w = target_sizes.unbind(1)
+ scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
+ boxes = boxes * scale_fct[:, None, :]
+
+ results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)]
+ return results
+
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.post_process_object_detection with Detr->Yolos
+ def post_process_object_detection(
+ self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, List[Tuple]] = None
+ ):
+ """
+ Converts the raw output of [`YolosForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
+ bottom_right_x, bottom_right_y) format. Only supports PyTorch.
+
+ Args:
+ outputs ([`YolosObjectDetectionOutput`]):
+ Raw outputs of the model.
+ threshold (`float`, *optional*):
+ Score threshold to keep object detection predictions.
+ target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*):
+ Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size
+ `(height, width)` of each image in the batch. If unset, predictions will not be resized.
+ Returns:
+ `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
+ in the batch as predicted by the model.
+ """
+ out_logits, out_bbox = outputs.logits, outputs.pred_boxes
+
+ if target_sizes is not None:
+ if len(out_logits) != len(target_sizes):
+ raise ValueError(
+ "Make sure that you pass in as many target sizes as the batch dimension of the logits"
+ )
+
+ prob = nn.functional.softmax(out_logits, -1)
+ scores, labels = prob[..., :-1].max(-1)
+
+ # Convert to [x0, y0, x1, y1] format
+ boxes = center_to_corners_format(out_bbox)
+
+ # Convert from relative [0, 1] to absolute [0, height] coordinates
+ if target_sizes is not None:
+ if isinstance(target_sizes, List):
+ img_h = torch.Tensor([i[0] for i in target_sizes])
+ img_w = torch.Tensor([i[1] for i in target_sizes])
+ else:
+ img_h, img_w = target_sizes.unbind(1)
+
+ scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
+ boxes = boxes * scale_fct[:, None, :]
+
+ results = []
+ for s, l, b in zip(scores, labels, boxes):
+ score = s[s > threshold]
+ label = l[s > threshold]
+ box = b[s > threshold]
+ results.append({"scores": score, "labels": label, "boxes": box})
+
+ return results
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/yolos/modeling_yolos.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/yolos/modeling_yolos.py
new file mode 100644
index 0000000000000000000000000000000000000000..f47b6b228f571eb59a08bfbb1ac34b7ab6df0bad
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/yolos/modeling_yolos.py
@@ -0,0 +1,1321 @@
+# coding=utf-8
+# Copyright 2022 School of EIC, Huazhong University of Science & Technology and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch YOLOS model."""
+
+
+import collections.abc
+import math
+from dataclasses import dataclass
+from typing import Dict, List, Optional, Set, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import Tensor, nn
+
+from ...activations import ACT2FN
+from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_accelerate_available,
+ is_scipy_available,
+ is_vision_available,
+ logging,
+ replace_return_docstrings,
+ requires_backends,
+)
+from .configuration_yolos import YolosConfig
+
+
+if is_scipy_available():
+ from scipy.optimize import linear_sum_assignment
+
+if is_vision_available():
+ from transformers.image_transforms import center_to_corners_format
+
+if is_accelerate_available():
+ from accelerate import PartialState
+ from accelerate.utils import reduce
+
+logger = logging.get_logger(__name__)
+
+# General docstring
+_CONFIG_FOR_DOC = "YolosConfig"
+
+# Base docstring
+_CHECKPOINT_FOR_DOC = "hustvl/yolos-small"
+_EXPECTED_OUTPUT_SHAPE = [1, 3401, 384]
+
+
+from ..deprecated._archive_maps import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+@dataclass
+class YolosObjectDetectionOutput(ModelOutput):
+ """
+ Output type of [`YolosForObjectDetection`].
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)):
+ Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a
+ bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized
+ scale-invariant IoU loss.
+ loss_dict (`Dict`, *optional*):
+ A dictionary containing the individual losses. Useful for logging.
+ logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`):
+ Classification logits (including no-object) for all queries.
+ pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
+ Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
+ values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding
+ possible padding). You can use [`~YolosImageProcessor.post_process`] to retrieve the unnormalized bounding
+ boxes.
+ auxiliary_outputs (`list[Dict]`, *optional*):
+ Optional, only returned when auxilary losses are activated (i.e. `config.auxiliary_loss` is set to `True`)
+ and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and
+ `pred_boxes`) for each decoder layer.
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the decoder of the model.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of
+ the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ loss_dict: Optional[Dict] = None
+ logits: torch.FloatTensor = None
+ pred_boxes: torch.FloatTensor = None
+ auxiliary_outputs: Optional[List[Dict]] = None
+ last_hidden_state: Optional[torch.FloatTensor] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+class YolosEmbeddings(nn.Module):
+ """
+ Construct the CLS token, detection tokens, position and patch embeddings.
+
+ """
+
+ def __init__(self, config: YolosConfig) -> None:
+ super().__init__()
+
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
+ self.detection_tokens = nn.Parameter(torch.zeros(1, config.num_detection_tokens, config.hidden_size))
+ self.patch_embeddings = YolosPatchEmbeddings(config)
+ num_patches = self.patch_embeddings.num_patches
+ self.position_embeddings = nn.Parameter(
+ torch.zeros(1, num_patches + config.num_detection_tokens + 1, config.hidden_size)
+ )
+
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.interpolation = InterpolateInitialPositionEmbeddings(config)
+ self.config = config
+
+ def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
+ batch_size, num_channels, height, width = pixel_values.shape
+ embeddings = self.patch_embeddings(pixel_values)
+
+ batch_size, seq_len, _ = embeddings.size()
+
+ # add the [CLS] and detection tokens to the embedded patch tokens
+ cls_tokens = self.cls_token.expand(batch_size, -1, -1)
+ detection_tokens = self.detection_tokens.expand(batch_size, -1, -1)
+ embeddings = torch.cat((cls_tokens, embeddings, detection_tokens), dim=1)
+
+ # add positional encoding to each token
+ # this might require interpolation of the existing position embeddings
+ position_embeddings = self.interpolation(self.position_embeddings, (height, width))
+
+ embeddings = embeddings + position_embeddings
+
+ embeddings = self.dropout(embeddings)
+
+ return embeddings
+
+
+class InterpolateInitialPositionEmbeddings(nn.Module):
+ def __init__(self, config) -> None:
+ super().__init__()
+ self.config = config
+
+ def forward(self, pos_embed, img_size=(800, 1344)) -> torch.Tensor:
+ cls_pos_embed = pos_embed[:, 0, :]
+ cls_pos_embed = cls_pos_embed[:, None]
+ det_pos_embed = pos_embed[:, -self.config.num_detection_tokens :, :]
+ patch_pos_embed = pos_embed[:, 1 : -self.config.num_detection_tokens, :]
+ patch_pos_embed = patch_pos_embed.transpose(1, 2)
+ batch_size, hidden_size, seq_len = patch_pos_embed.shape
+
+ patch_height, patch_width = (
+ self.config.image_size[0] // self.config.patch_size,
+ self.config.image_size[1] // self.config.patch_size,
+ )
+ patch_pos_embed = patch_pos_embed.view(batch_size, hidden_size, patch_height, patch_width)
+
+ height, width = img_size
+ new_patch_heigth, new_patch_width = height // self.config.patch_size, width // self.config.patch_size
+ patch_pos_embed = nn.functional.interpolate(
+ patch_pos_embed, size=(new_patch_heigth, new_patch_width), mode="bicubic", align_corners=False
+ )
+ patch_pos_embed = patch_pos_embed.flatten(2).transpose(1, 2)
+ scale_pos_embed = torch.cat((cls_pos_embed, patch_pos_embed, det_pos_embed), dim=1)
+ return scale_pos_embed
+
+
+class InterpolateMidPositionEmbeddings(nn.Module):
+ def __init__(self, config) -> None:
+ super().__init__()
+ self.config = config
+
+ def forward(self, pos_embed, img_size=(800, 1344)) -> torch.Tensor:
+ cls_pos_embed = pos_embed[:, :, 0, :]
+ cls_pos_embed = cls_pos_embed[:, None]
+ det_pos_embed = pos_embed[:, :, -self.config.num_detection_tokens :, :]
+ patch_pos_embed = pos_embed[:, :, 1 : -self.config.num_detection_tokens, :]
+ patch_pos_embed = patch_pos_embed.transpose(2, 3)
+ depth, batch_size, hidden_size, seq_len = patch_pos_embed.shape
+
+ patch_height, patch_width = (
+ self.config.image_size[0] // self.config.patch_size,
+ self.config.image_size[1] // self.config.patch_size,
+ )
+ patch_pos_embed = patch_pos_embed.view(depth * batch_size, hidden_size, patch_height, patch_width)
+ height, width = img_size
+ new_patch_height, new_patch_width = height // self.config.patch_size, width // self.config.patch_size
+ patch_pos_embed = nn.functional.interpolate(
+ patch_pos_embed, size=(new_patch_height, new_patch_width), mode="bicubic", align_corners=False
+ )
+ patch_pos_embed = (
+ patch_pos_embed.flatten(2)
+ .transpose(1, 2)
+ .contiguous()
+ .view(depth, batch_size, new_patch_height * new_patch_width, hidden_size)
+ )
+ scale_pos_embed = torch.cat((cls_pos_embed, patch_pos_embed, det_pos_embed), dim=2)
+ return scale_pos_embed
+
+
+class YolosPatchEmbeddings(nn.Module):
+ """
+ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
+ `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
+ Transformer.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ image_size, patch_size = config.image_size, config.patch_size
+ num_channels, hidden_size = config.num_channels, config.hidden_size
+
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.num_patches = num_patches
+
+ self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
+
+ def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
+ batch_size, num_channels, height, width = pixel_values.shape
+ if num_channels != self.num_channels:
+ raise ValueError(
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
+ )
+
+ embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2)
+ return embeddings
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTSelfAttention with ViT->Yolos
+class YolosSelfAttention(nn.Module):
+ def __init__(self, config: YolosConfig) -> None:
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
+ f"heads {config.num_attention_heads}."
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
+ mixed_query_layer = self.query(hidden_states)
+
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ return outputs
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->Yolos
+class YolosSelfOutput(nn.Module):
+ """
+ The residual connection is defined in YolosLayer instead of here (as is the case with other models), due to the
+ layernorm applied before each block.
+ """
+
+ def __init__(self, config: YolosConfig) -> None:
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ return hidden_states
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->Yolos
+class YolosAttention(nn.Module):
+ def __init__(self, config: YolosConfig) -> None:
+ super().__init__()
+ self.attention = YolosSelfAttention(config)
+ self.output = YolosSelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads: Set[int]) -> None:
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.attention.query = prune_linear_layer(self.attention.query, index)
+ self.attention.key = prune_linear_layer(self.attention.key, index)
+ self.attention.value = prune_linear_layer(self.attention.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
+ self_outputs = self.attention(hidden_states, head_mask, output_attentions)
+
+ attention_output = self.output(self_outputs[0], hidden_states)
+
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTIntermediate with ViT->Yolos
+class YolosIntermediate(nn.Module):
+ def __init__(self, config: YolosConfig) -> None:
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+
+ return hidden_states
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTOutput with ViT->Yolos
+class YolosOutput(nn.Module):
+ def __init__(self, config: YolosConfig) -> None:
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ hidden_states = hidden_states + input_tensor
+
+ return hidden_states
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTLayer with ViT->Yolos
+class YolosLayer(nn.Module):
+ """This corresponds to the Block class in the timm implementation."""
+
+ def __init__(self, config: YolosConfig) -> None:
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.attention = YolosAttention(config)
+ self.intermediate = YolosIntermediate(config)
+ self.output = YolosOutput(config)
+ self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
+ self_attention_outputs = self.attention(
+ self.layernorm_before(hidden_states), # in Yolos, layernorm is applied before self-attention
+ head_mask,
+ output_attentions=output_attentions,
+ )
+ attention_output = self_attention_outputs[0]
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ # first residual connection
+ hidden_states = attention_output + hidden_states
+
+ # in Yolos, layernorm is also applied after self-attention
+ layer_output = self.layernorm_after(hidden_states)
+ layer_output = self.intermediate(layer_output)
+
+ # second residual connection is done here
+ layer_output = self.output(layer_output, hidden_states)
+
+ outputs = (layer_output,) + outputs
+
+ return outputs
+
+
+class YolosEncoder(nn.Module):
+ def __init__(self, config: YolosConfig) -> None:
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList([YolosLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ seq_length = (
+ 1 + (config.image_size[0] * config.image_size[1] // config.patch_size**2) + config.num_detection_tokens
+ )
+ self.mid_position_embeddings = (
+ nn.Parameter(
+ torch.zeros(
+ config.num_hidden_layers - 1,
+ 1,
+ seq_length,
+ config.hidden_size,
+ )
+ )
+ if config.use_mid_position_embeddings
+ else None
+ )
+
+ self.interpolation = InterpolateMidPositionEmbeddings(config) if config.use_mid_position_embeddings else None
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ height,
+ width,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ) -> Union[tuple, BaseModelOutput]:
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ if self.config.use_mid_position_embeddings:
+ interpolated_mid_position_embeddings = self.interpolation(self.mid_position_embeddings, (height, width))
+
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ layer_head_mask,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions)
+
+ hidden_states = layer_outputs[0]
+
+ if self.config.use_mid_position_embeddings:
+ if i < (self.config.num_hidden_layers - 1):
+ hidden_states = hidden_states + interpolated_mid_position_embeddings[i]
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+
+class YolosPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = YolosConfig
+ base_model_prefix = "vit"
+ main_input_name = "pixel_values"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
+ """Initialize the weights"""
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+YOLOS_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`YolosConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+YOLOS_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
+ [`YolosImageProcessor.__call__`] for details.
+
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare YOLOS Model transformer outputting raw hidden-states without any specific head on top.",
+ YOLOS_START_DOCSTRING,
+)
+class YolosModel(YolosPreTrainedModel):
+ def __init__(self, config: YolosConfig, add_pooling_layer: bool = True):
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = YolosEmbeddings(config)
+ self.encoder = YolosEncoder(config)
+
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.pooler = YolosPooler(config) if add_pooling_layer else None
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self) -> YolosPatchEmbeddings:
+ return self.embeddings.patch_embeddings
+
+ def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:
+ """
+ Prunes heads of the model.
+
+ Args:
+ heads_to_prune (`dict` of {layer_num: list of heads to prune in this layer}):
+ See base class `PreTrainedModel`.
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(YOLOS_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithPooling,
+ config_class=_CONFIG_FOR_DOC,
+ modality="vision",
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
+ )
+ def forward(
+ self,
+ pixel_values: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ embedding_output = self.embeddings(pixel_values)
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ height=pixel_values.shape[-2],
+ width=pixel_values.shape[-1],
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+ sequence_output = self.layernorm(sequence_output)
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
+
+ if not return_dict:
+ head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,)
+ return head_outputs + encoder_outputs[1:]
+
+ return BaseModelOutputWithPooling(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+class YolosPooler(nn.Module):
+ def __init__(self, config: YolosConfig):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.activation = nn.Tanh()
+
+ def forward(self, hidden_states):
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(first_token_tensor)
+ pooled_output = self.activation(pooled_output)
+ return pooled_output
+
+
+@add_start_docstrings(
+ """
+ YOLOS Model (consisting of a ViT encoder) with object detection heads on top, for tasks such as COCO detection.
+ """,
+ YOLOS_START_DOCSTRING,
+)
+class YolosForObjectDetection(YolosPreTrainedModel):
+ def __init__(self, config: YolosConfig):
+ super().__init__(config)
+
+ # YOLOS (ViT) encoder model
+ self.vit = YolosModel(config, add_pooling_layer=False)
+
+ # Object detection heads
+ # We add one for the "no object" class
+ self.class_labels_classifier = YolosMLPPredictionHead(
+ input_dim=config.hidden_size, hidden_dim=config.hidden_size, output_dim=config.num_labels + 1, num_layers=3
+ )
+ self.bbox_predictor = YolosMLPPredictionHead(
+ input_dim=config.hidden_size, hidden_dim=config.hidden_size, output_dim=4, num_layers=3
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ # taken from https://github.com/facebookresearch/detr/blob/master/models/detr.py
+ @torch.jit.unused
+ def _set_aux_loss(self, outputs_class, outputs_coord):
+ # this is a workaround to make torchscript happy, as torchscript
+ # doesn't support dictionary with non-homogeneous values, such
+ # as a dict having both a Tensor and a list.
+ return [{"logits": a, "pred_boxes": b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]
+
+ @add_start_docstrings_to_model_forward(YOLOS_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=YolosObjectDetectionOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: torch.FloatTensor,
+ labels: Optional[List[Dict]] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, YolosObjectDetectionOutput]:
+ r"""
+ labels (`List[Dict]` of len `(batch_size,)`, *optional*):
+ Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the
+ following 2 keys: `'class_labels'` and `'boxes'` (the class labels and bounding boxes of an image in the
+ batch respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding
+ boxes in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image,
+ 4)`.
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, AutoModelForObjectDetection
+ >>> import torch
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("hustvl/yolos-tiny")
+ >>> model = AutoModelForObjectDetection.from_pretrained("hustvl/yolos-tiny")
+
+ >>> inputs = image_processor(images=image, return_tensors="pt")
+ >>> outputs = model(**inputs)
+
+ >>> # convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax)
+ >>> target_sizes = torch.tensor([image.size[::-1]])
+ >>> results = image_processor.post_process_object_detection(outputs, threshold=0.9, target_sizes=target_sizes)[
+ ... 0
+ ... ]
+
+ >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
+ ... box = [round(i, 2) for i in box.tolist()]
+ ... print(
+ ... f"Detected {model.config.id2label[label.item()]} with confidence "
+ ... f"{round(score.item(), 3)} at location {box}"
+ ... )
+ Detected remote with confidence 0.991 at location [46.48, 72.78, 178.98, 119.3]
+ Detected remote with confidence 0.908 at location [336.48, 79.27, 368.23, 192.36]
+ Detected cat with confidence 0.934 at location [337.18, 18.06, 638.14, 373.09]
+ Detected cat with confidence 0.979 at location [10.93, 53.74, 313.41, 470.67]
+ Detected remote with confidence 0.974 at location [41.63, 72.23, 178.09, 119.99]
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # First, sent images through YOLOS base model to obtain hidden states
+ outputs = self.vit(
+ pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ # Take the final hidden states of the detection tokens
+ sequence_output = sequence_output[:, -self.config.num_detection_tokens :, :]
+
+ # Class logits + predicted bounding boxes
+ logits = self.class_labels_classifier(sequence_output)
+ pred_boxes = self.bbox_predictor(sequence_output).sigmoid()
+
+ loss, loss_dict, auxiliary_outputs = None, None, None
+ if labels is not None:
+ # First: create the matcher
+ matcher = YolosHungarianMatcher(
+ class_cost=self.config.class_cost, bbox_cost=self.config.bbox_cost, giou_cost=self.config.giou_cost
+ )
+ # Second: create the criterion
+ losses = ["labels", "boxes", "cardinality"]
+ criterion = YolosLoss(
+ matcher=matcher,
+ num_classes=self.config.num_labels,
+ eos_coef=self.config.eos_coefficient,
+ losses=losses,
+ )
+ criterion.to(self.device)
+ # Third: compute the losses, based on outputs and labels
+ outputs_loss = {}
+ outputs_loss["logits"] = logits
+ outputs_loss["pred_boxes"] = pred_boxes
+ if self.config.auxiliary_loss:
+ intermediate = outputs.intermediate_hidden_states if return_dict else outputs[4]
+ outputs_class = self.class_labels_classifier(intermediate)
+ outputs_coord = self.bbox_predictor(intermediate).sigmoid()
+ auxiliary_outputs = self._set_aux_loss(outputs_class, outputs_coord)
+ outputs_loss["auxiliary_outputs"] = auxiliary_outputs
+
+ loss_dict = criterion(outputs_loss, labels)
+ # Fourth: compute total loss, as a weighted sum of the various losses
+ weight_dict = {"loss_ce": 1, "loss_bbox": self.config.bbox_loss_coefficient}
+ weight_dict["loss_giou"] = self.config.giou_loss_coefficient
+ if self.config.auxiliary_loss:
+ aux_weight_dict = {}
+ for i in range(self.config.decoder_layers - 1):
+ aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()})
+ weight_dict.update(aux_weight_dict)
+ loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
+
+ if not return_dict:
+ if auxiliary_outputs is not None:
+ output = (logits, pred_boxes) + auxiliary_outputs + outputs
+ else:
+ output = (logits, pred_boxes) + outputs
+ return ((loss, loss_dict) + output) if loss is not None else output
+
+ return YolosObjectDetectionOutput(
+ loss=loss,
+ loss_dict=loss_dict,
+ logits=logits,
+ pred_boxes=pred_boxes,
+ auxiliary_outputs=auxiliary_outputs,
+ last_hidden_state=outputs.last_hidden_state,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+# Copied from transformers.models.detr.modeling_detr.dice_loss
+def dice_loss(inputs, targets, num_boxes):
+ """
+ Compute the DICE loss, similar to generalized IOU for masks
+
+ Args:
+ inputs: A float tensor of arbitrary shape.
+ The predictions for each example.
+ targets: A float tensor with the same shape as inputs. Stores the binary
+ classification label for each element in inputs (0 for the negative class and 1 for the positive
+ class).
+ """
+ inputs = inputs.sigmoid()
+ inputs = inputs.flatten(1)
+ numerator = 2 * (inputs * targets).sum(1)
+ denominator = inputs.sum(-1) + targets.sum(-1)
+ loss = 1 - (numerator + 1) / (denominator + 1)
+ return loss.sum() / num_boxes
+
+
+# Copied from transformers.models.detr.modeling_detr.sigmoid_focal_loss
+def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):
+ """
+ Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
+
+ Args:
+ inputs (`torch.FloatTensor` of arbitrary shape):
+ The predictions for each example.
+ targets (`torch.FloatTensor` with the same shape as `inputs`)
+ A tensor storing the binary classification label for each element in the `inputs` (0 for the negative class
+ and 1 for the positive class).
+ alpha (`float`, *optional*, defaults to `0.25`):
+ Optional weighting factor in the range (0,1) to balance positive vs. negative examples.
+ gamma (`int`, *optional*, defaults to `2`):
+ Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples.
+
+ Returns:
+ Loss tensor
+ """
+ prob = inputs.sigmoid()
+ ce_loss = nn.functional.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
+ # add modulating factor
+ p_t = prob * targets + (1 - prob) * (1 - targets)
+ loss = ce_loss * ((1 - p_t) ** gamma)
+
+ if alpha >= 0:
+ alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
+ loss = alpha_t * loss
+
+ return loss.mean(1).sum() / num_boxes
+
+
+# Copied from transformers.models.detr.modeling_detr.DetrLoss with Detr->Yolos
+class YolosLoss(nn.Module):
+ """
+ This class computes the losses for YolosForObjectDetection/YolosForSegmentation. The process happens in two steps: 1)
+ we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair
+ of matched ground-truth / prediction (supervise class and box).
+
+ A note on the `num_classes` argument (copied from original repo in detr.py): "the naming of the `num_classes`
+ parameter of the criterion is somewhat misleading. It indeed corresponds to `max_obj_id` + 1, where `max_obj_id` is
+ the maximum id for a class in your dataset. For example, COCO has a `max_obj_id` of 90, so we pass `num_classes` to
+ be 91. As another example, for a dataset that has a single class with `id` 1, you should pass `num_classes` to be 2
+ (`max_obj_id` + 1). For more details on this, check the following discussion
+ https://github.com/facebookresearch/detr/issues/108#issuecomment-650269223"
+
+
+ Args:
+ matcher (`YolosHungarianMatcher`):
+ Module able to compute a matching between targets and proposals.
+ num_classes (`int`):
+ Number of object categories, omitting the special no-object category.
+ eos_coef (`float`):
+ Relative classification weight applied to the no-object category.
+ losses (`List[str]`):
+ List of all the losses to be applied. See `get_loss` for a list of all available losses.
+ """
+
+ def __init__(self, matcher, num_classes, eos_coef, losses):
+ super().__init__()
+ self.matcher = matcher
+ self.num_classes = num_classes
+ self.eos_coef = eos_coef
+ self.losses = losses
+ empty_weight = torch.ones(self.num_classes + 1)
+ empty_weight[-1] = self.eos_coef
+ self.register_buffer("empty_weight", empty_weight)
+
+ # removed logging parameter, which was part of the original implementation
+ def loss_labels(self, outputs, targets, indices, num_boxes):
+ """
+ Classification loss (NLL) targets dicts must contain the key "class_labels" containing a tensor of dim
+ [nb_target_boxes]
+ """
+ if "logits" not in outputs:
+ raise KeyError("No logits were found in the outputs")
+ source_logits = outputs["logits"]
+
+ idx = self._get_source_permutation_idx(indices)
+ target_classes_o = torch.cat([t["class_labels"][J] for t, (_, J) in zip(targets, indices)])
+ target_classes = torch.full(
+ source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device
+ )
+ target_classes[idx] = target_classes_o
+
+ loss_ce = nn.functional.cross_entropy(source_logits.transpose(1, 2), target_classes, self.empty_weight)
+ losses = {"loss_ce": loss_ce}
+
+ return losses
+
+ @torch.no_grad()
+ def loss_cardinality(self, outputs, targets, indices, num_boxes):
+ """
+ Compute the cardinality error, i.e. the absolute error in the number of predicted non-empty boxes.
+
+ This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients.
+ """
+ logits = outputs["logits"]
+ device = logits.device
+ target_lengths = torch.as_tensor([len(v["class_labels"]) for v in targets], device=device)
+ # Count the number of predictions that are NOT "no-object" (which is the last class)
+ card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1)
+ card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float())
+ losses = {"cardinality_error": card_err}
+ return losses
+
+ def loss_boxes(self, outputs, targets, indices, num_boxes):
+ """
+ Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss.
+
+ Targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]. The target boxes
+ are expected in format (center_x, center_y, w, h), normalized by the image size.
+ """
+ if "pred_boxes" not in outputs:
+ raise KeyError("No predicted boxes found in outputs")
+ idx = self._get_source_permutation_idx(indices)
+ source_boxes = outputs["pred_boxes"][idx]
+ target_boxes = torch.cat([t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0)
+
+ loss_bbox = nn.functional.l1_loss(source_boxes, target_boxes, reduction="none")
+
+ losses = {}
+ losses["loss_bbox"] = loss_bbox.sum() / num_boxes
+
+ loss_giou = 1 - torch.diag(
+ generalized_box_iou(center_to_corners_format(source_boxes), center_to_corners_format(target_boxes))
+ )
+ losses["loss_giou"] = loss_giou.sum() / num_boxes
+ return losses
+
+ def loss_masks(self, outputs, targets, indices, num_boxes):
+ """
+ Compute the losses related to the masks: the focal loss and the dice loss.
+
+ Targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w].
+ """
+ if "pred_masks" not in outputs:
+ raise KeyError("No predicted masks found in outputs")
+
+ source_idx = self._get_source_permutation_idx(indices)
+ target_idx = self._get_target_permutation_idx(indices)
+ source_masks = outputs["pred_masks"]
+ source_masks = source_masks[source_idx]
+ masks = [t["masks"] for t in targets]
+ # TODO use valid to mask invalid areas due to padding in loss
+ target_masks, valid = nested_tensor_from_tensor_list(masks).decompose()
+ target_masks = target_masks.to(source_masks)
+ target_masks = target_masks[target_idx]
+
+ # upsample predictions to the target size
+ source_masks = nn.functional.interpolate(
+ source_masks[:, None], size=target_masks.shape[-2:], mode="bilinear", align_corners=False
+ )
+ source_masks = source_masks[:, 0].flatten(1)
+
+ target_masks = target_masks.flatten(1)
+ target_masks = target_masks.view(source_masks.shape)
+ losses = {
+ "loss_mask": sigmoid_focal_loss(source_masks, target_masks, num_boxes),
+ "loss_dice": dice_loss(source_masks, target_masks, num_boxes),
+ }
+ return losses
+
+ def _get_source_permutation_idx(self, indices):
+ # permute predictions following indices
+ batch_idx = torch.cat([torch.full_like(source, i) for i, (source, _) in enumerate(indices)])
+ source_idx = torch.cat([source for (source, _) in indices])
+ return batch_idx, source_idx
+
+ def _get_target_permutation_idx(self, indices):
+ # permute targets following indices
+ batch_idx = torch.cat([torch.full_like(target, i) for i, (_, target) in enumerate(indices)])
+ target_idx = torch.cat([target for (_, target) in indices])
+ return batch_idx, target_idx
+
+ def get_loss(self, loss, outputs, targets, indices, num_boxes):
+ loss_map = {
+ "labels": self.loss_labels,
+ "cardinality": self.loss_cardinality,
+ "boxes": self.loss_boxes,
+ "masks": self.loss_masks,
+ }
+ if loss not in loss_map:
+ raise ValueError(f"Loss {loss} not supported")
+ return loss_map[loss](outputs, targets, indices, num_boxes)
+
+ def forward(self, outputs, targets):
+ """
+ This performs the loss computation.
+
+ Args:
+ outputs (`dict`, *optional*):
+ Dictionary of tensors, see the output specification of the model for the format.
+ targets (`List[dict]`, *optional*):
+ List of dicts, such that `len(targets) == batch_size`. The expected keys in each dict depends on the
+ losses applied, see each loss' doc.
+ """
+ outputs_without_aux = {k: v for k, v in outputs.items() if k != "auxiliary_outputs"}
+
+ # Retrieve the matching between the outputs of the last layer and the targets
+ indices = self.matcher(outputs_without_aux, targets)
+
+ # Compute the average number of target boxes across all nodes, for normalization purposes
+ num_boxes = sum(len(t["class_labels"]) for t in targets)
+ num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)
+ world_size = 1
+ if is_accelerate_available():
+ if PartialState._shared_state != {}:
+ num_boxes = reduce(num_boxes)
+ world_size = PartialState().num_processes
+ num_boxes = torch.clamp(num_boxes / world_size, min=1).item()
+
+ # Compute all the requested losses
+ losses = {}
+ for loss in self.losses:
+ losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))
+
+ # In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
+ if "auxiliary_outputs" in outputs:
+ for i, auxiliary_outputs in enumerate(outputs["auxiliary_outputs"]):
+ indices = self.matcher(auxiliary_outputs, targets)
+ for loss in self.losses:
+ if loss == "masks":
+ # Intermediate masks losses are too costly to compute, we ignore them.
+ continue
+ l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes)
+ l_dict = {k + f"_{i}": v for k, v in l_dict.items()}
+ losses.update(l_dict)
+
+ return losses
+
+
+# Copied from transformers.models.detr.modeling_detr.DetrMLPPredictionHead with Detr->Yolos
+class YolosMLPPredictionHead(nn.Module):
+ """
+ Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates,
+ height and width of a bounding box w.r.t. an image.
+
+ Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py
+
+ """
+
+ def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
+ super().__init__()
+ self.num_layers = num_layers
+ h = [hidden_dim] * (num_layers - 1)
+ self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
+
+ def forward(self, x):
+ for i, layer in enumerate(self.layers):
+ x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
+ return x
+
+
+# Copied from transformers.models.detr.modeling_detr.DetrHungarianMatcher with Detr->Yolos
+class YolosHungarianMatcher(nn.Module):
+ """
+ This class computes an assignment between the targets and the predictions of the network.
+
+ For efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more
+ predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are
+ un-matched (and thus treated as non-objects).
+
+ Args:
+ class_cost:
+ The relative weight of the classification error in the matching cost.
+ bbox_cost:
+ The relative weight of the L1 error of the bounding box coordinates in the matching cost.
+ giou_cost:
+ The relative weight of the giou loss of the bounding box in the matching cost.
+ """
+
+ def __init__(self, class_cost: float = 1, bbox_cost: float = 1, giou_cost: float = 1):
+ super().__init__()
+ requires_backends(self, ["scipy"])
+
+ self.class_cost = class_cost
+ self.bbox_cost = bbox_cost
+ self.giou_cost = giou_cost
+ if class_cost == 0 and bbox_cost == 0 and giou_cost == 0:
+ raise ValueError("All costs of the Matcher can't be 0")
+
+ @torch.no_grad()
+ def forward(self, outputs, targets):
+ """
+ Args:
+ outputs (`dict`):
+ A dictionary that contains at least these entries:
+ * "logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
+ * "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates.
+ targets (`List[dict]`):
+ A list of targets (len(targets) = batch_size), where each target is a dict containing:
+ * "class_labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of
+ ground-truth
+ objects in the target) containing the class labels
+ * "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates.
+
+ Returns:
+ `List[Tuple]`: A list of size `batch_size`, containing tuples of (index_i, index_j) where:
+ - index_i is the indices of the selected predictions (in order)
+ - index_j is the indices of the corresponding selected targets (in order)
+ For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
+ """
+ batch_size, num_queries = outputs["logits"].shape[:2]
+
+ # We flatten to compute the cost matrices in a batch
+ out_prob = outputs["logits"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes]
+ out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4]
+
+ # Also concat the target labels and boxes
+ target_ids = torch.cat([v["class_labels"] for v in targets])
+ target_bbox = torch.cat([v["boxes"] for v in targets])
+
+ # Compute the classification cost. Contrary to the loss, we don't use the NLL,
+ # but approximate it in 1 - proba[target class].
+ # The 1 is a constant that doesn't change the matching, it can be ommitted.
+ class_cost = -out_prob[:, target_ids]
+
+ # Compute the L1 cost between boxes
+ bbox_cost = torch.cdist(out_bbox, target_bbox, p=1)
+
+ # Compute the giou cost between boxes
+ giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox))
+
+ # Final cost matrix
+ cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost
+ cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu()
+
+ sizes = [len(v["boxes"]) for v in targets]
+ indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))]
+ return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
+
+
+# Copied from transformers.models.detr.modeling_detr._upcast
+def _upcast(t: Tensor) -> Tensor:
+ # Protects from numerical overflows in multiplications by upcasting to the equivalent higher type
+ if t.is_floating_point():
+ return t if t.dtype in (torch.float32, torch.float64) else t.float()
+ else:
+ return t if t.dtype in (torch.int32, torch.int64) else t.int()
+
+
+# Copied from transformers.models.detr.modeling_detr.box_area
+def box_area(boxes: Tensor) -> Tensor:
+ """
+ Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates.
+
+ Args:
+ boxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`):
+ Boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with `0 <= x1
+ < x2` and `0 <= y1 < y2`.
+
+ Returns:
+ `torch.FloatTensor`: a tensor containing the area for each box.
+ """
+ boxes = _upcast(boxes)
+ return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
+
+
+# Copied from transformers.models.detr.modeling_detr.box_iou
+def box_iou(boxes1, boxes2):
+ area1 = box_area(boxes1)
+ area2 = box_area(boxes2)
+
+ left_top = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
+ right_bottom = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
+
+ width_height = (right_bottom - left_top).clamp(min=0) # [N,M,2]
+ inter = width_height[:, :, 0] * width_height[:, :, 1] # [N,M]
+
+ union = area1[:, None] + area2 - inter
+
+ iou = inter / union
+ return iou, union
+
+
+# Copied from transformers.models.detr.modeling_detr.generalized_box_iou
+def generalized_box_iou(boxes1, boxes2):
+ """
+ Generalized IoU from https://giou.stanford.edu/. The boxes should be in [x0, y0, x1, y1] (corner) format.
+
+ Returns:
+ `torch.FloatTensor`: a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2)
+ """
+ # degenerate boxes gives inf / nan results
+ # so do an early check
+ if not (boxes1[:, 2:] >= boxes1[:, :2]).all():
+ raise ValueError(f"boxes1 must be in [x0, y0, x1, y1] (corner) format, but got {boxes1}")
+ if not (boxes2[:, 2:] >= boxes2[:, :2]).all():
+ raise ValueError(f"boxes2 must be in [x0, y0, x1, y1] (corner) format, but got {boxes2}")
+ iou, union = box_iou(boxes1, boxes2)
+
+ top_left = torch.min(boxes1[:, None, :2], boxes2[:, :2])
+ bottom_right = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
+
+ width_height = (bottom_right - top_left).clamp(min=0) # [N,M,2]
+ area = width_height[:, :, 0] * width_height[:, :, 1]
+
+ return iou - (area - union) / area
+
+
+# Copied from transformers.models.detr.modeling_detr._max_by_axis
+def _max_by_axis(the_list):
+ # type: (List[List[int]]) -> List[int]
+ maxes = the_list[0]
+ for sublist in the_list[1:]:
+ for index, item in enumerate(sublist):
+ maxes[index] = max(maxes[index], item)
+ return maxes
+
+
+# Copied from transformers.models.detr.modeling_detr.NestedTensor
+class NestedTensor(object):
+ def __init__(self, tensors, mask: Optional[Tensor]):
+ self.tensors = tensors
+ self.mask = mask
+
+ def to(self, device):
+ cast_tensor = self.tensors.to(device)
+ mask = self.mask
+ if mask is not None:
+ cast_mask = mask.to(device)
+ else:
+ cast_mask = None
+ return NestedTensor(cast_tensor, cast_mask)
+
+ def decompose(self):
+ return self.tensors, self.mask
+
+ def __repr__(self):
+ return str(self.tensors)
+
+
+# Copied from transformers.models.detr.modeling_detr.nested_tensor_from_tensor_list
+def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
+ if tensor_list[0].ndim == 3:
+ max_size = _max_by_axis([list(img.shape) for img in tensor_list])
+ batch_shape = [len(tensor_list)] + max_size
+ batch_size, num_channels, height, width = batch_shape
+ dtype = tensor_list[0].dtype
+ device = tensor_list[0].device
+ tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
+ mask = torch.ones((batch_size, height, width), dtype=torch.bool, device=device)
+ for img, pad_img, m in zip(tensor_list, tensor, mask):
+ pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
+ m[: img.shape[1], : img.shape[2]] = False
+ else:
+ raise ValueError("Only 3-dimensional tensors are supported")
+ return NestedTensor(tensor, mask)