diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d2a484b9b8285083ba958772743207d64a8403bc
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/__init__.py
@@ -0,0 +1,97 @@
+# flake8: noqa
+# There's no way to ignore "F401 '...' imported but unused" warnings in this
+# module, but to preserve other warnings. So, don't check this module at all.
+
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+# rely on isort to merge the imports
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_torch_available,
+ is_tf_available,
+)
+
+
+_import_structure = {
+ "configuration_convnextv2": [
+ "CONVNEXTV2_PRETRAINED_CONFIG_ARCHIVE_MAP",
+ "ConvNextV2Config",
+ ]
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_convnextv2"] = [
+ "CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "ConvNextV2ForImageClassification",
+ "ConvNextV2Model",
+ "ConvNextV2PreTrainedModel",
+ "ConvNextV2Backbone",
+ ]
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_convnextv2"] = [
+ "TFConvNextV2ForImageClassification",
+ "TFConvNextV2Model",
+ "TFConvNextV2PreTrainedModel",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_convnextv2 import (
+ CONVNEXTV2_PRETRAINED_CONFIG_ARCHIVE_MAP,
+ ConvNextV2Config,
+ )
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_convnextv2 import (
+ CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST,
+ ConvNextV2Backbone,
+ ConvNextV2ForImageClassification,
+ ConvNextV2Model,
+ ConvNextV2PreTrainedModel,
+ )
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_convnextv2 import (
+ TFConvNextV2ForImageClassification,
+ TFConvNextV2Model,
+ TFConvNextV2PreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/configuration_convnextv2.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/configuration_convnextv2.py
new file mode 100644
index 0000000000000000000000000000000000000000..3d7d1fa7397714868567b935198e83ddb9c296c8
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/configuration_convnextv2.py
@@ -0,0 +1,118 @@
+# coding=utf-8
+# Copyright 2023 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" ConvNeXTV2 model configuration"""
+
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
+
+
+logger = logging.get_logger(__name__)
+
+CONVNEXTV2_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
+}
+
+
+class ConvNextV2Config(BackboneConfigMixin, PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`ConvNextV2Model`]. It is used to instantiate an
+ ConvNeXTV2 model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the ConvNeXTV2
+ [facebook/convnextv2-tiny-1k-224](https://huggingface.co/facebook/convnextv2-tiny-1k-224) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ num_channels (`int`, *optional*, defaults to 3):
+ The number of input channels.
+ patch_size (`int`, optional, defaults to 4):
+ Patch size to use in the patch embedding layer.
+ num_stages (`int`, optional, defaults to 4):
+ The number of stages in the model.
+ hidden_sizes (`List[int]`, *optional*, defaults to `[96, 192, 384, 768]`):
+ Dimensionality (hidden size) at each stage.
+ depths (`List[int]`, *optional*, defaults to `[3, 3, 9, 3]`):
+ Depth (number of blocks) for each stage.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in each block. If string, `"gelu"`, `"relu"`,
+ `"selu"` and `"gelu_new"` are supported.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
+ The drop rate for stochastic depth.
+ out_features (`List[str]`, *optional*):
+ If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
+ (depending on how many stages the model has). If unset and `out_indices` is set, will default to the
+ corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
+ same order as defined in the `stage_names` attribute.
+ out_indices (`List[int]`, *optional*):
+ If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
+ many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
+ If unset and `out_features` is unset, will default to the last stage. Must be in the
+ same order as defined in the `stage_names` attribute.
+
+ Example:
+ ```python
+ >>> from transformers import ConvNeXTV2Config, ConvNextV2Model
+
+ >>> # Initializing a ConvNeXTV2 convnextv2-tiny-1k-224 style configuration
+ >>> configuration = ConvNeXTV2Config()
+
+ >>> # Initializing a model (with random weights) from the convnextv2-tiny-1k-224 style configuration
+ >>> model = ConvNextV2Model(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "convnextv2"
+
+ def __init__(
+ self,
+ num_channels=3,
+ patch_size=4,
+ num_stages=4,
+ hidden_sizes=None,
+ depths=None,
+ hidden_act="gelu",
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ drop_path_rate=0.0,
+ image_size=224,
+ out_features=None,
+ out_indices=None,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.num_channels = num_channels
+ self.patch_size = patch_size
+ self.num_stages = num_stages
+ self.hidden_sizes = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
+ self.depths = [3, 3, 9, 3] if depths is None else depths
+ self.hidden_act = hidden_act
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.drop_path_rate = drop_path_rate
+ self.image_size = image_size
+ self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
+ self._out_features, self._out_indices = get_aligned_output_features_output_indices(
+ out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/convert_convnextv2_to_pytorch.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/convert_convnextv2_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..8094ecf0d6157a1bb2343817f7e9303f622d9102
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/convert_convnextv2_to_pytorch.py
@@ -0,0 +1,286 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert ConvNeXTV2 checkpoints from the original repository.
+
+URL: https://github.com/facebookresearch/ConvNeXt"""
+
+import argparse
+import json
+import os
+
+import requests
+import torch
+from huggingface_hub import hf_hub_download
+from PIL import Image
+
+from transformers import ConvNextImageProcessor, ConvNextV2Config, ConvNextV2ForImageClassification
+from transformers.image_utils import PILImageResampling
+from transformers.utils import logging
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+
+def get_convnextv2_config(checkpoint_url):
+ config = ConvNextV2Config()
+
+ if "atto" in checkpoint_url:
+ depths = [2, 2, 6, 2]
+ hidden_sizes = [40, 80, 160, 320]
+ if "femto" in checkpoint_url:
+ depths = [2, 2, 6, 2]
+ hidden_sizes = [48, 96, 192, 384]
+ if "pico" in checkpoint_url:
+ depths = [2, 2, 6, 2]
+ hidden_sizes = [64, 128, 256, 512]
+ if "nano" in checkpoint_url:
+ depths = [2, 2, 8, 2]
+ hidden_sizes = [80, 160, 320, 640]
+ if "tiny" in checkpoint_url:
+ depths = [3, 3, 9, 3]
+ hidden_sizes = [96, 192, 384, 768]
+ if "base" in checkpoint_url:
+ depths = [3, 3, 27, 3]
+ hidden_sizes = [128, 256, 512, 1024]
+ if "large" in checkpoint_url:
+ depths = [3, 3, 27, 3]
+ hidden_sizes = [192, 384, 768, 1536]
+ if "huge" in checkpoint_url:
+ depths = [3, 3, 27, 3]
+ hidden_sizes = [352, 704, 1408, 2816]
+
+ num_labels = 1000
+ filename = "imagenet-1k-id2label.json"
+ expected_shape = (1, 1000)
+
+ repo_id = "huggingface/label-files"
+ config.num_labels = num_labels
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
+ id2label = {int(k): v for k, v in id2label.items()}
+
+ config.id2label = id2label
+ config.label2id = {v: k for k, v in id2label.items()}
+ config.hidden_sizes = hidden_sizes
+ config.depths = depths
+
+ return config, expected_shape
+
+
+def rename_key(name):
+ if "downsample_layers.0.0" in name:
+ name = name.replace("downsample_layers.0.0", "embeddings.patch_embeddings")
+ if "downsample_layers.0.1" in name:
+ name = name.replace("downsample_layers.0.1", "embeddings.norm") # we rename to layernorm later on
+ if "downsample_layers.1.0" in name:
+ name = name.replace("downsample_layers.1.0", "stages.1.downsampling_layer.0")
+ if "downsample_layers.1.1" in name:
+ name = name.replace("downsample_layers.1.1", "stages.1.downsampling_layer.1")
+ if "downsample_layers.2.0" in name:
+ name = name.replace("downsample_layers.2.0", "stages.2.downsampling_layer.0")
+ if "downsample_layers.2.1" in name:
+ name = name.replace("downsample_layers.2.1", "stages.2.downsampling_layer.1")
+ if "downsample_layers.3.0" in name:
+ name = name.replace("downsample_layers.3.0", "stages.3.downsampling_layer.0")
+ if "downsample_layers.3.1" in name:
+ name = name.replace("downsample_layers.3.1", "stages.3.downsampling_layer.1")
+ if "stages" in name and "downsampling_layer" not in name:
+ # stages.0.0. for instance should be renamed to stages.0.layers.0.
+ name = name[: len("stages.0")] + ".layers" + name[len("stages.0") :]
+ if "gamma" in name:
+ name = name.replace("gamma", "weight")
+ if "beta" in name:
+ name = name.replace("beta", "bias")
+ if "stages" in name:
+ name = name.replace("stages", "encoder.stages")
+ if "norm" in name:
+ name = name.replace("norm", "layernorm")
+ if "head" in name:
+ name = name.replace("head", "classifier")
+
+ return name
+
+
+# We will verify our results on an image of cute cats
+def prepare_img():
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ im = Image.open(requests.get(url, stream=True).raw)
+ return im
+
+
+def convert_preprocessor(checkpoint_url):
+ if "224" in checkpoint_url:
+ size = 224
+ crop_pct = 224 / 256
+ elif "384" in checkpoint_url:
+ size = 384
+ crop_pct = None
+ else:
+ size = 512
+ crop_pct = None
+
+ return ConvNextImageProcessor(
+ size=size,
+ crop_pct=crop_pct,
+ image_mean=[0.485, 0.456, 0.406],
+ image_std=[0.229, 0.224, 0.225],
+ resample=PILImageResampling.BICUBIC,
+ )
+
+
+@torch.no_grad()
+def convert_convnextv2_checkpoint(checkpoint_url, pytorch_dump_folder_path, save_model, push_to_hub):
+ """
+ Copy/paste/tweak model's weights to our ConvNeXTV2 structure.
+ """
+ print("Downloading original model from checkpoint...")
+ # define ConvNeXTV2 configuration based on URL
+ config, expected_shape = get_convnextv2_config(checkpoint_url)
+ # load original state_dict from URL
+ state_dict = torch.hub.load_state_dict_from_url(checkpoint_url)["model"]
+
+ print("Converting model parameters...")
+ # rename keys
+ for key in state_dict.copy().keys():
+ val = state_dict.pop(key)
+ state_dict[rename_key(key)] = val
+ # add prefix to all keys expect classifier head
+ for key in state_dict.copy().keys():
+ val = state_dict.pop(key)
+ if not key.startswith("classifier"):
+ key = "convnextv2." + key
+ state_dict[key] = val
+
+ # load HuggingFace model
+ model = ConvNextV2ForImageClassification(config)
+ model.load_state_dict(state_dict)
+ model.eval()
+
+ # Check outputs on an image, prepared by ConvNextImageProcessor
+ preprocessor = convert_preprocessor(checkpoint_url)
+ inputs = preprocessor(images=prepare_img(), return_tensors="pt")
+ logits = model(**inputs).logits
+
+ # note: the logits below were obtained without center cropping
+ if checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_atto_1k_224_ema.pt":
+ expected_logits = torch.tensor([-0.3930, 0.1747, -0.5246, 0.4177, 0.4295])
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_femto_1k_224_ema.pt":
+ expected_logits = torch.tensor([-0.1727, -0.5341, -0.7818, -0.4745, -0.6566])
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_pico_1k_224_ema.pt":
+ expected_logits = torch.tensor([-0.0333, 0.1563, -0.9137, 0.1054, 0.0381])
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_nano_1k_224_ema.pt":
+ expected_logits = torch.tensor([-0.1744, -0.1555, -0.0713, 0.0950, -0.1431])
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_tiny_1k_224_ema.pt":
+ expected_logits = torch.tensor([0.9996, 0.1966, -0.4386, -0.3472, 0.6661])
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_base_1k_224_ema.pt":
+ expected_logits = torch.tensor([-0.2553, -0.6708, -0.1359, 0.2518, -0.2488])
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_large_1k_224_ema.pt":
+ expected_logits = torch.tensor([-0.0673, -0.5627, -0.3753, -0.2722, 0.0178])
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_huge_1k_224_ema.pt":
+ expected_logits = torch.tensor([-0.6377, -0.7458, -0.2150, 0.1184, -0.0597])
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_nano_22k_224_ema.pt":
+ expected_logits = torch.tensor([1.0799, 0.2322, -0.8860, 1.0219, 0.6231])
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_nano_22k_384_ema.pt":
+ expected_logits = torch.tensor([0.3766, 0.4917, -1.1426, 0.9942, 0.6024])
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_tiny_22k_224_ema.pt":
+ expected_logits = torch.tensor([0.4220, -0.6919, -0.4317, -0.2881, -0.6609])
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_tiny_22k_384_ema.pt":
+ expected_logits = torch.tensor([0.1082, -0.8286, -0.5095, 0.4681, -0.8085])
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_base_22k_224_ema.pt":
+ expected_logits = torch.tensor([-0.2419, -0.6221, 0.2176, -0.0980, -0.7527])
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_base_22k_384_ema.pt":
+ expected_logits = torch.tensor([0.0391, -0.4371, 0.3786, 0.1251, -0.2784])
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_large_22k_224_ema.pt":
+ expected_logits = torch.tensor([-0.0504, 0.5636, -0.1729, -0.6507, -0.3949])
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_large_22k_384_ema.pt":
+ expected_logits = torch.tensor([0.3560, 0.9486, 0.3149, -0.2667, -0.5138])
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_huge_22k_384_ema.pt":
+ expected_logits = torch.tensor([-0.2469, -0.4550, -0.5853, -0.0810, 0.0309])
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_huge_22k_512_ema.pt":
+ expected_logits = torch.tensor([-0.3090, 0.0802, -0.0682, -0.1979, -0.2826])
+ else:
+ raise ValueError(f"Unknown URL: {checkpoint_url}")
+
+ assert torch.allclose(logits[0, :5], expected_logits, atol=1e-3)
+ assert logits.shape == expected_shape
+ print("Model outputs match the original results!")
+
+ if save_model:
+ print("Saving model to local...")
+ # Create folder to save model
+ if not os.path.isdir(pytorch_dump_folder_path):
+ os.mkdir(pytorch_dump_folder_path)
+
+ model.save_pretrained(pytorch_dump_folder_path)
+ preprocessor.save_pretrained(pytorch_dump_folder_path)
+
+ model_name = "convnextv2"
+ if "atto" in checkpoint_url:
+ model_name += "-atto"
+ if "femto" in checkpoint_url:
+ model_name += "-femto"
+ if "pico" in checkpoint_url:
+ model_name += "-pico"
+ if "nano" in checkpoint_url:
+ model_name += "-nano"
+ elif "tiny" in checkpoint_url:
+ model_name += "-tiny"
+ elif "base" in checkpoint_url:
+ model_name += "-base"
+ elif "large" in checkpoint_url:
+ model_name += "-large"
+ elif "huge" in checkpoint_url:
+ model_name += "-huge"
+ if "22k" in checkpoint_url and "1k" not in checkpoint_url:
+ model_name += "-22k"
+ elif "22k" in checkpoint_url and "1k" in checkpoint_url:
+ model_name += "-22k-1k"
+ elif "1k" in checkpoint_url:
+ model_name += "-1k"
+ if "224" in checkpoint_url:
+ model_name += "-224"
+ elif "384" in checkpoint_url:
+ model_name += "-384"
+ elif "512" in checkpoint_url:
+ model_name += "-512"
+
+ if push_to_hub:
+ print(f"Pushing {model_name} to the hub...")
+ model.push_to_hub(model_name)
+ preprocessor.push_to_hub(model_name)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--checkpoint_url",
+ default="https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_atto_1k_224_ema.pt",
+ type=str,
+ help="URL of the original ConvNeXTV2 checkpoint you'd like to convert.",
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path",
+ default="model",
+ type=str,
+ help="Path to the output PyTorch model directory.",
+ )
+ parser.add_argument("--save_model", action="store_true", help="Save model to local")
+ parser.add_argument("--push_to_hub", action="store_true", help="Push model and image preprocessor to the hub")
+
+ args = parser.parse_args()
+ convert_convnextv2_checkpoint(
+ args.checkpoint_url, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/modeling_convnextv2.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/modeling_convnextv2.py
new file mode 100644
index 0000000000000000000000000000000000000000..8d166200d12253c9992cde427dcc23d88e527826
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/modeling_convnextv2.py
@@ -0,0 +1,576 @@
+# coding=utf-8
+# Copyright 2023 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch ConvNextV2 model."""
+
+
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import (
+ BackboneOutput,
+ BaseModelOutputWithNoAttention,
+ BaseModelOutputWithPoolingAndNoAttention,
+ ImageClassifierOutputWithNoAttention,
+)
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from ...utils.backbone_utils import BackboneMixin
+from .configuration_convnextv2 import ConvNextV2Config
+
+
+logger = logging.get_logger(__name__)
+
+# General docstring
+_CONFIG_FOR_DOC = "ConvNextV2Config"
+
+# Base docstring
+_CHECKPOINT_FOR_DOC = "facebook/convnextv2-tiny-1k-224"
+_EXPECTED_OUTPUT_SHAPE = [1, 768, 7, 7]
+
+# Image classification docstring
+_IMAGE_CLASS_CHECKPOINT = "facebook/convnextv2-tiny-1k-224"
+_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
+
+CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "facebook/convnextv2-tiny-1k-224",
+ # See all ConvNextV2 models at https://huggingface.co/models?filter=convnextv2
+]
+
+
+# Copied from transformers.models.beit.modeling_beit.drop_path
+def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
+ """
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
+
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
+ argument.
+ """
+ if drop_prob == 0.0 or not training:
+ return input
+ keep_prob = 1 - drop_prob
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
+ random_tensor.floor_() # binarize
+ output = input.div(keep_prob) * random_tensor
+ return output
+
+
+# Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->ConvNextV2
+class ConvNextV2DropPath(nn.Module):
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
+
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
+ super().__init__()
+ self.drop_prob = drop_prob
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ return drop_path(hidden_states, self.drop_prob, self.training)
+
+ def extra_repr(self) -> str:
+ return "p={}".format(self.drop_prob)
+
+
+class ConvNextV2GRN(nn.Module):
+ """GRN (Global Response Normalization) layer"""
+
+ def __init__(self, dim: int):
+ super().__init__()
+ self.weight = nn.Parameter(torch.zeros(1, 1, 1, dim))
+ self.bias = nn.Parameter(torch.zeros(1, 1, 1, dim))
+
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
+ # Compute and normalize global spatial feature maps
+ global_features = torch.norm(hidden_states, p=2, dim=(1, 2), keepdim=True)
+ norm_features = global_features / (global_features.mean(dim=-1, keepdim=True) + 1e-6)
+ hidden_states = self.weight * (hidden_states * norm_features) + self.bias + hidden_states
+
+ return hidden_states
+
+
+# Copied from transformers.models.convnext.modeling_convnext.ConvNextLayerNorm with ConvNext->ConvNextV2
+class ConvNextV2LayerNorm(nn.Module):
+ r"""LayerNorm that supports two data formats: channels_last (default) or channels_first.
+ The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height,
+ width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width).
+ """
+
+ def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
+ super().__init__()
+ self.weight = nn.Parameter(torch.ones(normalized_shape))
+ self.bias = nn.Parameter(torch.zeros(normalized_shape))
+ self.eps = eps
+ self.data_format = data_format
+ if self.data_format not in ["channels_last", "channels_first"]:
+ raise NotImplementedError(f"Unsupported data format: {self.data_format}")
+ self.normalized_shape = (normalized_shape,)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ if self.data_format == "channels_last":
+ x = torch.nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
+ elif self.data_format == "channels_first":
+ input_dtype = x.dtype
+ x = x.float()
+ u = x.mean(1, keepdim=True)
+ s = (x - u).pow(2).mean(1, keepdim=True)
+ x = (x - u) / torch.sqrt(s + self.eps)
+ x = x.to(dtype=input_dtype)
+ x = self.weight[:, None, None] * x + self.bias[:, None, None]
+ return x
+
+
+# Copied from transformers.models.convnext.modeling_convnext.ConvNextEmbeddings with ConvNext->ConvNextV2
+class ConvNextV2Embeddings(nn.Module):
+ """This class is comparable to (and inspired by) the SwinEmbeddings class
+ found in src/transformers/models/swin/modeling_swin.py.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ self.patch_embeddings = nn.Conv2d(
+ config.num_channels, config.hidden_sizes[0], kernel_size=config.patch_size, stride=config.patch_size
+ )
+ self.layernorm = ConvNextV2LayerNorm(config.hidden_sizes[0], eps=1e-6, data_format="channels_first")
+ self.num_channels = config.num_channels
+
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
+ num_channels = pixel_values.shape[1]
+ if num_channels != self.num_channels:
+ raise ValueError(
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
+ )
+ embeddings = self.patch_embeddings(pixel_values)
+ embeddings = self.layernorm(embeddings)
+ return embeddings
+
+
+class ConvNextV2Layer(nn.Module):
+ """This corresponds to the `Block` class in the original implementation.
+
+ There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C,
+ H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back
+
+ The authors used (2) as they find it slightly faster in PyTorch.
+
+ Args:
+ config ([`ConvNextV2Config`]): Model configuration class.
+ dim (`int`): Number of input channels.
+ drop_path (`float`): Stochastic depth rate. Default: 0.0.
+ """
+
+ def __init__(self, config, dim, drop_path=0):
+ super().__init__()
+ # depthwise conv
+ self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim)
+ self.layernorm = ConvNextV2LayerNorm(dim, eps=1e-6)
+ # pointwise/1x1 convs, implemented with linear layers
+ self.pwconv1 = nn.Linear(dim, 4 * dim)
+ self.act = ACT2FN[config.hidden_act]
+ self.grn = ConvNextV2GRN(4 * dim)
+ self.pwconv2 = nn.Linear(4 * dim, dim)
+ self.drop_path = ConvNextV2DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
+
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
+ input = hidden_states
+ x = self.dwconv(hidden_states)
+ # (batch_size, num_channels, height, width) -> (batch_size, height, width, num_channels)
+ x = x.permute(0, 2, 3, 1)
+ x = self.layernorm(x)
+ x = self.pwconv1(x)
+ x = self.act(x)
+ x = self.grn(x)
+ x = self.pwconv2(x)
+ # (batch_size, height, width, num_channels) -> (batch_size, num_channels, height, width)
+ x = x.permute(0, 3, 1, 2)
+
+ x = input + self.drop_path(x)
+ return x
+
+
+# Copied from transformers.models.convnext.modeling_convnext.ConvNextStage with ConvNeXT->ConvNeXTV2, ConvNext->ConvNextV2
+class ConvNextV2Stage(nn.Module):
+ """ConvNeXTV2 stage, consisting of an optional downsampling layer + multiple residual blocks.
+
+ Args:
+ config ([`ConvNextV2Config`]): Model configuration class.
+ in_channels (`int`): Number of input channels.
+ out_channels (`int`): Number of output channels.
+ depth (`int`): Number of residual blocks.
+ drop_path_rates(`List[float]`): Stochastic depth rates for each layer.
+ """
+
+ def __init__(self, config, in_channels, out_channels, kernel_size=2, stride=2, depth=2, drop_path_rates=None):
+ super().__init__()
+
+ if in_channels != out_channels or stride > 1:
+ self.downsampling_layer = nn.Sequential(
+ ConvNextV2LayerNorm(in_channels, eps=1e-6, data_format="channels_first"),
+ nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride),
+ )
+ else:
+ self.downsampling_layer = nn.Identity()
+ drop_path_rates = drop_path_rates or [0.0] * depth
+ self.layers = nn.Sequential(
+ *[ConvNextV2Layer(config, dim=out_channels, drop_path=drop_path_rates[j]) for j in range(depth)]
+ )
+
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
+ hidden_states = self.downsampling_layer(hidden_states)
+ hidden_states = self.layers(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.convnext.modeling_convnext.ConvNextEncoder with ConvNext->ConvNextV2
+class ConvNextV2Encoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.stages = nn.ModuleList()
+ drop_path_rates = [
+ x.tolist() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths)).split(config.depths)
+ ]
+ prev_chs = config.hidden_sizes[0]
+ for i in range(config.num_stages):
+ out_chs = config.hidden_sizes[i]
+ stage = ConvNextV2Stage(
+ config,
+ in_channels=prev_chs,
+ out_channels=out_chs,
+ stride=2 if i > 0 else 1,
+ depth=config.depths[i],
+ drop_path_rates=drop_path_rates[i],
+ )
+ self.stages.append(stage)
+ prev_chs = out_chs
+
+ def forward(
+ self,
+ hidden_states: torch.FloatTensor,
+ output_hidden_states: Optional[bool] = False,
+ return_dict: Optional[bool] = True,
+ ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
+ all_hidden_states = () if output_hidden_states else None
+
+ for i, layer_module in enumerate(self.stages):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ hidden_states = layer_module(hidden_states)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
+
+ return BaseModelOutputWithNoAttention(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ )
+
+
+# Copied from transformers.models.convnext.modeling_convnext.ConvNextPreTrainedModel with ConvNext->ConvNextV2, convnext->convnextv2
+class ConvNextV2PreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = ConvNextV2Config
+ base_model_prefix = "convnextv2"
+ main_input_name = "pixel_values"
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+CONVNEXTV2_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`ConvNextV2Config`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+CONVNEXTV2_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`ConvNextImageProcessor`]. See
+ [`ConvNextImageProcessor.__call__`] for details.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare ConvNextV2 model outputting raw features without any specific head on top.",
+ CONVNEXTV2_START_DOCSTRING,
+)
+# Copied from transformers.models.convnext.modeling_convnext.ConvNextModel with CONVNEXT->CONVNEXTV2, ConvNext->ConvNextV2
+class ConvNextV2Model(ConvNextV2PreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = ConvNextV2Embeddings(config)
+ self.encoder = ConvNextV2Encoder(config)
+
+ # final layernorm layer
+ self.layernorm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(CONVNEXTV2_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithPoolingAndNoAttention,
+ config_class=_CONFIG_FOR_DOC,
+ modality="vision",
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
+ )
+ def forward(
+ self,
+ pixel_values: torch.FloatTensor = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPoolingAndNoAttention]:
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ embedding_output = self.embeddings(pixel_values)
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ last_hidden_state = encoder_outputs[0]
+
+ # global average pooling, (N, C, H, W) -> (N, C)
+ pooled_output = self.layernorm(last_hidden_state.mean([-2, -1]))
+
+ if not return_dict:
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPoolingAndNoAttention(
+ last_hidden_state=last_hidden_state,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ )
+
+
+@add_start_docstrings(
+ """
+ ConvNextV2 Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
+ ImageNet.
+ """,
+ CONVNEXTV2_START_DOCSTRING,
+)
+# Copied from transformers.models.convnext.modeling_convnext.ConvNextForImageClassification with CONVNEXT->CONVNEXTV2,ConvNext->ConvNextV2,convnext->convnextv2
+class ConvNextV2ForImageClassification(ConvNextV2PreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.num_labels = config.num_labels
+ self.convnextv2 = ConvNextV2Model(config)
+
+ # Classifier head
+ self.classifier = (
+ nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(CONVNEXTV2_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
+ output_type=ImageClassifierOutputWithNoAttention,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
+ )
+ def forward(
+ self,
+ pixel_values: torch.FloatTensor = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.convnextv2(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
+
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
+
+ logits = self.classifier(pooled_output)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return ImageClassifierOutputWithNoAttention(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ )
+
+
+@add_start_docstrings(
+ """
+ ConvNeXT V2 backbone, to be used with frameworks like DETR and MaskFormer.
+ """,
+ CONVNEXTV2_START_DOCSTRING,
+)
+# Copied from transformers.models.convnext.modeling_convnext.ConvNextBackbone with CONVNEXT->CONVNEXTV2,ConvNext->ConvNextV2,facebook/convnext-tiny-224->facebook/convnextv2-tiny-1k-224
+class ConvNextV2Backbone(ConvNextV2PreTrainedModel, BackboneMixin):
+ def __init__(self, config):
+ super().__init__(config)
+ super()._init_backbone(config)
+
+ self.embeddings = ConvNextV2Embeddings(config)
+ self.encoder = ConvNextV2Encoder(config)
+ self.num_features = [config.hidden_sizes[0]] + config.hidden_sizes
+
+ # Add layer norms to hidden states of out_features
+ hidden_states_norms = {}
+ for stage, num_channels in zip(self._out_features, self.channels):
+ hidden_states_norms[stage] = ConvNextV2LayerNorm(num_channels, data_format="channels_first")
+ self.hidden_states_norms = nn.ModuleDict(hidden_states_norms)
+
+ # initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(CONVNEXTV2_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: torch.Tensor,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> BackboneOutput:
+ """
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, AutoBackbone
+ >>> import torch
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> processor = AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224")
+ >>> model = AutoBackbone.from_pretrained("facebook/convnextv2-tiny-1k-224")
+
+ >>> inputs = processor(image, return_tensors="pt")
+ >>> outputs = model(**inputs)
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+
+ embedding_output = self.embeddings(pixel_values)
+
+ outputs = self.encoder(
+ embedding_output,
+ output_hidden_states=True,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs.hidden_states if return_dict else outputs[1]
+
+ feature_maps = ()
+ for stage, hidden_state in zip(self.stage_names, hidden_states):
+ if stage in self.out_features:
+ hidden_state = self.hidden_states_norms[stage](hidden_state)
+ feature_maps += (hidden_state,)
+
+ if not return_dict:
+ output = (feature_maps,)
+ if output_hidden_states:
+ output += (hidden_states,)
+ return output
+
+ return BackboneOutput(
+ feature_maps=feature_maps,
+ hidden_states=hidden_states if output_hidden_states else None,
+ attentions=None,
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/modeling_tf_convnextv2.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/modeling_tf_convnextv2.py
new file mode 100644
index 0000000000000000000000000000000000000000..d4bef6f161d2bf9c5f148741712a2df379315c6e
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/modeling_tf_convnextv2.py
@@ -0,0 +1,686 @@
+# coding=utf-8
+# Copyright 2023 Meta Platforms Inc. and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" TF 2.0 ConvNextV2 model."""
+
+
+from __future__ import annotations
+
+from typing import List, Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ...activations_tf import get_tf_activation
+from ...modeling_tf_outputs import (
+ TFBaseModelOutputWithNoAttention,
+ TFBaseModelOutputWithPooling,
+ TFBaseModelOutputWithPoolingAndNoAttention,
+ TFImageClassifierOutputWithNoAttention,
+)
+from ...modeling_tf_utils import (
+ TFModelInputType,
+ TFPreTrainedModel,
+ TFSequenceClassificationLoss,
+ get_initializer,
+ keras,
+ keras_serializable,
+ unpack_inputs,
+)
+from ...tf_utils import shape_list
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+)
+from .configuration_convnextv2 import ConvNextV2Config
+
+
+logger = logging.get_logger(__name__)
+
+# General docstring
+_CONFIG_FOR_DOC = "ConvNextV2Config"
+
+# Base docstring
+_CHECKPOINT_FOR_DOC = "facebook/convnextv2-tiny-1k-224"
+_EXPECTED_OUTPUT_SHAPE = [1, 768, 7, 7]
+
+# Image classification docstring
+_IMAGE_CLASS_CHECKPOINT = "facebook/convnextv2-tiny-1k-224"
+_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
+
+CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "facebook/convnextv2-tiny-1k-224",
+ # See all ConvNextV2 models at https://huggingface.co/models?filter=convnextv2
+]
+
+
+# Copied from transformers.models.convnext.modeling_tf_convnext.TFConvNextDropPath with ConvNext->ConvNextV2
+class TFConvNextV2DropPath(keras.layers.Layer):
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
+ References:
+ (1) github.com:rwightman/pytorch-image-models
+ """
+
+ def __init__(self, drop_path: float, **kwargs):
+ super().__init__(**kwargs)
+ self.drop_path = drop_path
+
+ def call(self, x: tf.Tensor, training=None):
+ if training:
+ keep_prob = 1 - self.drop_path
+ shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1)
+ random_tensor = keep_prob + tf.random.uniform(shape, 0, 1)
+ random_tensor = tf.floor(random_tensor)
+ return (x / keep_prob) * random_tensor
+ return x
+
+
+class TFConvNextV2GRN(keras.layers.Layer):
+ """GRN (Global Response Normalization) layer"""
+
+ def __init__(self, config: ConvNextV2Config, dim: int, **kwargs):
+ super().__init__(**kwargs)
+ self.dim = dim
+
+ def build(self, input_shape: tf.TensorShape = None):
+ # PT's `nn.Parameters` must be mapped to a TF layer weight to inherit the same name hierarchy (and vice-versa)
+ self.weight = self.add_weight(
+ name="weight",
+ shape=(1, 1, 1, self.dim),
+ initializer=keras.initializers.Zeros(),
+ )
+ self.bias = self.add_weight(
+ name="bias",
+ shape=(1, 1, 1, self.dim),
+ initializer=keras.initializers.Zeros(),
+ )
+ return super().build(input_shape)
+
+ def call(self, hidden_states: tf.Tensor):
+ global_features = tf.norm(hidden_states, ord="euclidean", axis=(1, 2), keepdims=True)
+ norm_features = global_features / (tf.reduce_mean(global_features, axis=-1, keepdims=True) + 1e-6)
+ hidden_states = self.weight * (hidden_states * norm_features) + self.bias + hidden_states
+ return hidden_states
+
+
+# Copied from transformers.models.convnext.modeling_tf_convnext.TFConvNextEmbeddings with ConvNext->ConvNextV2
+class TFConvNextV2Embeddings(keras.layers.Layer):
+ """This class is comparable to (and inspired by) the SwinEmbeddings class
+ found in src/transformers/models/swin/modeling_swin.py.
+ """
+
+ def __init__(self, config: ConvNextV2Config, **kwargs):
+ super().__init__(**kwargs)
+ self.patch_embeddings = keras.layers.Conv2D(
+ filters=config.hidden_sizes[0],
+ kernel_size=config.patch_size,
+ strides=config.patch_size,
+ name="patch_embeddings",
+ kernel_initializer=get_initializer(config.initializer_range),
+ bias_initializer=keras.initializers.Zeros(),
+ )
+ self.layernorm = keras.layers.LayerNormalization(epsilon=1e-6, name="layernorm")
+ self.num_channels = config.num_channels
+ self.config = config
+
+ def call(self, pixel_values):
+ if isinstance(pixel_values, dict):
+ pixel_values = pixel_values["pixel_values"]
+
+ tf.debugging.assert_equal(
+ shape_list(pixel_values)[1],
+ self.num_channels,
+ message="Make sure that the channel dimension of the pixel values match with the one set in the configuration.",
+ )
+
+ # When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format.
+ # So change the input format from `NCHW` to `NHWC`.
+ # shape = (batch_size, in_height, in_width, in_channels)
+ pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
+
+ embeddings = self.patch_embeddings(pixel_values)
+ embeddings = self.layernorm(embeddings)
+ return embeddings
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "patch_embeddings", None) is not None:
+ with tf.name_scope(self.patch_embeddings.name):
+ self.patch_embeddings.build([None, None, None, self.config.num_channels])
+ if getattr(self, "layernorm", None) is not None:
+ with tf.name_scope(self.layernorm.name):
+ self.layernorm.build([None, None, None, self.config.hidden_sizes[0]])
+
+
+class TFConvNextV2Layer(keras.layers.Layer):
+ """This corresponds to the `Block` class in the original implementation.
+
+ There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C,
+ H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back
+
+ The authors used (2) as they find it slightly faster in PyTorch. Since we already permuted the inputs to follow
+ NHWC ordering, we can just apply the operations straight-away without the permutation.
+
+ Args:
+ config (`ConvNextV2Config`):
+ Model configuration class.
+ dim (`int`):
+ Number of input channels.
+ drop_path (`float`, defaults to 0.0):
+ Stochastic depth rate.
+ """
+
+ def __init__(self, config: ConvNextV2Config, dim: int, drop_path: float = 0.0, **kwargs):
+ super().__init__(**kwargs)
+ self.dim = dim
+ self.config = config
+ self.dwconv = keras.layers.Conv2D(
+ filters=dim,
+ kernel_size=7,
+ padding="same",
+ groups=dim,
+ kernel_initializer=get_initializer(config.initializer_range),
+ bias_initializer=keras.initializers.Zeros(),
+ name="dwconv",
+ ) # depthwise conv
+ self.layernorm = keras.layers.LayerNormalization(
+ epsilon=1e-6,
+ name="layernorm",
+ )
+ self.pwconv1 = keras.layers.Dense(
+ units=4 * dim,
+ kernel_initializer=get_initializer(config.initializer_range),
+ bias_initializer=keras.initializers.Zeros(),
+ name="pwconv1",
+ ) # pointwise/1x1 convs, implemented with linear layers
+ self.act = get_tf_activation(config.hidden_act)
+ self.grn = TFConvNextV2GRN(config, 4 * dim, dtype=tf.float32, name="grn")
+ self.pwconv2 = keras.layers.Dense(
+ units=dim,
+ kernel_initializer=get_initializer(config.initializer_range),
+ bias_initializer=keras.initializers.Zeros(),
+ name="pwconv2",
+ )
+ # Using `layers.Activation` instead of `tf.identity` to better control `training`
+ # behaviour.
+ self.drop_path = (
+ TFConvNextV2DropPath(drop_path, name="drop_path")
+ if drop_path > 0.0
+ else keras.layers.Activation("linear", name="drop_path")
+ )
+
+ def call(self, hidden_states, training=False):
+ input = hidden_states
+ x = self.dwconv(hidden_states)
+ x = self.layernorm(x)
+ x = self.pwconv1(x)
+ x = self.act(x)
+ x = self.grn(x)
+ x = self.pwconv2(x)
+ x = self.drop_path(x, training=training)
+ x = input + x
+ return x
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dwconv", None) is not None:
+ with tf.name_scope(self.dwconv.name):
+ self.dwconv.build([None, None, None, self.dim])
+ if getattr(self, "layernorm", None) is not None:
+ with tf.name_scope(self.layernorm.name):
+ self.layernorm.build([None, None, None, self.dim])
+ if getattr(self, "pwconv1", None) is not None:
+ with tf.name_scope(self.pwconv1.name):
+ self.pwconv1.build([None, None, self.dim])
+ if getattr(self, "grn", None) is not None:
+ with tf.name_scope(self.grn.name):
+ self.grn.build(None)
+ if getattr(self, "pwconv2", None) is not None:
+ with tf.name_scope(self.pwconv2.name):
+ self.pwconv2.build([None, None, 4 * self.dim])
+ if getattr(self, "drop_path", None) is not None:
+ with tf.name_scope(self.drop_path.name):
+ self.drop_path.build(None)
+
+
+# Copied from transformers.models.convnext.modeling_tf_convnext.TFConvNextStage with ConvNext->ConvNextV2
+class TFConvNextV2Stage(keras.layers.Layer):
+ """ConvNextV2 stage, consisting of an optional downsampling layer + multiple residual blocks.
+
+ Args:
+ config (`ConvNextV2V2Config`):
+ Model configuration class.
+ in_channels (`int`):
+ Number of input channels.
+ out_channels (`int`):
+ Number of output channels.
+ depth (`int`):
+ Number of residual blocks.
+ drop_path_rates(`List[float]`):
+ Stochastic depth rates for each layer.
+ """
+
+ def __init__(
+ self,
+ config: ConvNextV2Config,
+ in_channels: int,
+ out_channels: int,
+ kernel_size: int = 2,
+ stride: int = 2,
+ depth: int = 2,
+ drop_path_rates: Optional[List[float]] = None,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ if in_channels != out_channels or stride > 1:
+ self.downsampling_layer = [
+ keras.layers.LayerNormalization(
+ epsilon=1e-6,
+ name="downsampling_layer.0",
+ ),
+ # Inputs to this layer will follow NHWC format since we
+ # transposed the inputs from NCHW to NHWC in the `TFConvNextV2Embeddings`
+ # layer. All the outputs throughout the model will be in NHWC
+ # from this point on until the output where we again change to
+ # NCHW.
+ keras.layers.Conv2D(
+ filters=out_channels,
+ kernel_size=kernel_size,
+ strides=stride,
+ kernel_initializer=get_initializer(config.initializer_range),
+ bias_initializer=keras.initializers.Zeros(),
+ name="downsampling_layer.1",
+ ),
+ ]
+ else:
+ self.downsampling_layer = [tf.identity]
+
+ drop_path_rates = drop_path_rates or [0.0] * depth
+ self.layers = [
+ TFConvNextV2Layer(
+ config,
+ dim=out_channels,
+ drop_path=drop_path_rates[j],
+ name=f"layers.{j}",
+ )
+ for j in range(depth)
+ ]
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.stride = stride
+
+ def call(self, hidden_states):
+ for layer in self.downsampling_layer:
+ hidden_states = layer(hidden_states)
+ for layer in self.layers:
+ hidden_states = layer(hidden_states)
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "layers", None) is not None:
+ for layer in self.layers:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+ if self.in_channels != self.out_channels or self.stride > 1:
+ with tf.name_scope(self.downsampling_layer[0].name):
+ self.downsampling_layer[0].build([None, None, None, self.in_channels])
+ with tf.name_scope(self.downsampling_layer[1].name):
+ self.downsampling_layer[1].build([None, None, None, self.in_channels])
+
+
+class TFConvNextV2Encoder(keras.layers.Layer):
+ def __init__(self, config: ConvNextV2Config, **kwargs):
+ super().__init__(**kwargs)
+ self.stages = []
+ drop_path_rates = tf.linspace(0.0, config.drop_path_rate, sum(config.depths))
+ drop_path_rates = tf.split(drop_path_rates, config.depths)
+ drop_path_rates = [x.numpy().tolist() for x in drop_path_rates]
+ prev_chs = config.hidden_sizes[0]
+ for i in range(config.num_stages):
+ out_chs = config.hidden_sizes[i]
+ stage = TFConvNextV2Stage(
+ config,
+ in_channels=prev_chs,
+ out_channels=out_chs,
+ stride=2 if i > 0 else 1,
+ depth=config.depths[i],
+ drop_path_rates=drop_path_rates[i],
+ name=f"stages.{i}",
+ )
+ self.stages.append(stage)
+ prev_chs = out_chs
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ output_hidden_states: Optional[bool] = False,
+ return_dict: Optional[bool] = True,
+ ) -> Union[Tuple, TFBaseModelOutputWithNoAttention]:
+ all_hidden_states = () if output_hidden_states else None
+
+ for i, layer_module in enumerate(self.stages):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ hidden_states = layer_module(hidden_states)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
+
+ return TFBaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
+
+ def build(self, input_shape=None):
+ for stage in self.stages:
+ with tf.name_scope(stage.name):
+ stage.build(None)
+
+
+@keras_serializable
+class TFConvNextV2MainLayer(keras.layers.Layer):
+ config_class = ConvNextV2Config
+
+ def __init__(self, config: ConvNextV2Config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.embeddings = TFConvNextV2Embeddings(config, name="embeddings")
+ self.encoder = TFConvNextV2Encoder(config, name="encoder")
+ self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
+ # We are setting the `data_format` like so because from here on we will revert to the
+ # NCHW output format
+ self.pooler = keras.layers.GlobalAvgPool2D(data_format="channels_last")
+
+ @unpack_inputs
+ def call(
+ self,
+ pixel_values: TFModelInputType | None = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ embedding_output = self.embeddings(pixel_values, training=training)
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ last_hidden_state = encoder_outputs[0]
+
+ # Change to NCHW output format have uniformity in the modules
+ pooled_output = self.pooler(last_hidden_state)
+ last_hidden_state = tf.transpose(last_hidden_state, perm=(0, 3, 1, 2))
+ pooled_output = self.layernorm(pooled_output)
+
+ # Change the other hidden state outputs to NCHW as well
+ if output_hidden_states:
+ hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
+
+ if not return_dict:
+ hidden_states = hidden_states if output_hidden_states else ()
+ return (last_hidden_state, pooled_output) + hidden_states
+
+ return TFBaseModelOutputWithPoolingAndNoAttention(
+ last_hidden_state=last_hidden_state,
+ pooler_output=pooled_output,
+ hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embeddings", None) is not None:
+ with tf.name_scope(self.embeddings.name):
+ self.embeddings.build(None)
+ if getattr(self, "encoder", None) is not None:
+ with tf.name_scope(self.encoder.name):
+ self.encoder.build(None)
+ if getattr(self, "layernorm", None) is not None:
+ with tf.name_scope(self.layernorm.name):
+ self.layernorm.build([None, self.config.hidden_sizes[-1]])
+
+
+class TFConvNextV2PreTrainedModel(TFPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = ConvNextV2Config
+ base_model_prefix = "convnextv2"
+ main_input_name = "pixel_values"
+
+
+CONVNEXTV2_START_DOCSTRING = r"""
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TensorFlow models and layers in `transformers` accept two formats as input:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional argument.
+
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
+ positional argument:
+
+ - a single Tensor with `pixel_values` only and nothing else: `model(pixel_values)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([pixel_values, attention_mask])` or `model([pixel_values, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})`
+
+ Note that when creating models and layers with
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
+ about any of this, as you can just pass inputs like you would to any other Python function!
+
+
+
+ Parameters:
+ config ([`ConvNextV2Config`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+CONVNEXTV2_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]`, `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
+ [`ConvNextImageProcessor.__call__`] for details.
+
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to `True`.
+"""
+
+
+@add_start_docstrings(
+ "The bare ConvNextV2 model outputting raw features without any specific head on top.",
+ CONVNEXTV2_START_DOCSTRING,
+)
+class TFConvNextV2Model(TFConvNextV2PreTrainedModel):
+ def __init__(self, config: ConvNextV2Config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.convnextv2 = TFConvNextV2MainLayer(config, name="convnextv2")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(CONVNEXTV2_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFBaseModelOutputWithPoolingAndNoAttention,
+ config_class=_CONFIG_FOR_DOC,
+ modality="vision",
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
+ )
+ def call(
+ self,
+ pixel_values: TFModelInputType | None = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ outputs = self.convnextv2(
+ pixel_values=pixel_values,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ if not return_dict:
+ return outputs[:]
+
+ return TFBaseModelOutputWithPoolingAndNoAttention(
+ last_hidden_state=outputs.last_hidden_state,
+ pooler_output=outputs.pooler_output,
+ hidden_states=outputs.hidden_states,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "convnextv2", None) is not None:
+ with tf.name_scope(self.convnextv2.name):
+ self.convnextv2.build(None)
+
+
+@add_start_docstrings(
+ """
+ ConvNextV2 Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
+ ImageNet.
+ """,
+ CONVNEXTV2_START_DOCSTRING,
+)
+class TFConvNextV2ForImageClassification(TFConvNextV2PreTrainedModel, TFSequenceClassificationLoss):
+ def __init__(self, config: ConvNextV2Config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.num_labels = config.num_labels
+ self.convnextv2 = TFConvNextV2MainLayer(config, name="convnextv2")
+
+ # Classifier head
+ self.classifier = keras.layers.Dense(
+ units=config.num_labels,
+ kernel_initializer=get_initializer(config.initializer_range),
+ bias_initializer=keras.initializers.Zeros(),
+ name="classifier",
+ )
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(CONVNEXTV2_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
+ output_type=TFImageClassifierOutputWithNoAttention,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
+ )
+ def call(
+ self,
+ pixel_values: TFModelInputType | None = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFImageClassifierOutputWithNoAttention, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ outputs = self.convnextv2(
+ pixel_values,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
+
+ logits = self.classifier(pooled_output)
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFImageClassifierOutputWithNoAttention(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "convnextv2", None) is not None:
+ with tf.name_scope(self.convnextv2.name):
+ self.convnextv2.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.hidden_sizes[-1]])
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/configuration_distilbert.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/configuration_distilbert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2c3931276288b5e8357db9d0a24efd1ca47b6def
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/configuration_distilbert.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_distilbert.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_distilbert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b72061536d1e641a299bef9faa17b911b625ed91
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_distilbert.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_flax_distilbert.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_flax_distilbert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..479df14a03e530652b8ed6b788425d13d739e7eb
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_flax_distilbert.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_tf_distilbert.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_tf_distilbert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5386ed390bce03e777bc8aea0b5d0325c257f1a5
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_tf_distilbert.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/tokenization_distilbert_fast.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/tokenization_distilbert_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6c5b767a00716f470141ed26e0db3a04c3b2972d
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/tokenization_distilbert_fast.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/configuration_ernie.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/configuration_ernie.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c13163787b2df1cf15bc88949c4804cc2e0f17b9
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/configuration_ernie.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/gpt_sw3/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/gpt_sw3/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e7c08f0e27e747ea5468e0f9f014df4225dbd424
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/gpt_sw3/__init__.py
@@ -0,0 +1,43 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
+
+
+_import_structure = {}
+
+try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_gpt_sw3"] = ["GPTSw3Tokenizer"]
+
+
+if TYPE_CHECKING:
+ try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_gpt_sw3 import GPTSw3Tokenizer
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/gpt_sw3/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/gpt_sw3/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8522b8ecf945025adfe2e9a1428b0a4bd5ba72a8
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/gpt_sw3/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/gpt_sw3/__pycache__/convert_megatron_to_pytorch.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/gpt_sw3/__pycache__/convert_megatron_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2f2282ac51529d4bcb8512ea5f9f7a1df00860eb
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/gpt_sw3/__pycache__/convert_megatron_to_pytorch.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/gpt_sw3/__pycache__/tokenization_gpt_sw3.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/gpt_sw3/__pycache__/tokenization_gpt_sw3.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4a61c251887eb32471917640275f3ecae2811d32
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/gpt_sw3/__pycache__/tokenization_gpt_sw3.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/gpt_sw3/convert_megatron_to_pytorch.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/gpt_sw3/convert_megatron_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..5562efa287475be8786c28845124795951f6bfa6
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/gpt_sw3/convert_megatron_to_pytorch.py
@@ -0,0 +1,197 @@
+# Copyright 2022 The HuggingFace Inc. team and the AI-Sweden team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Convert GPT-SW3 megatron checkpoints to pytorch"""
+
+import argparse
+import os
+from os.path import isfile
+
+import torch
+
+from transformers import GPT2Config
+
+
+def recursive_print(name, val, spaces=0):
+ # Format the message.
+ if name is None:
+ msg = None
+ else:
+ fmt = "." * max(0, spaces - 2) + "# {:" + str(50 - spaces) + "s}"
+ msg = fmt.format(name)
+
+ # Print and recurse (if needed).
+ if isinstance(val, dict):
+ if msg is not None:
+ print(msg)
+ for k in val.keys():
+ recursive_print(k, val[k], spaces + 2)
+ elif isinstance(val, torch.Tensor):
+ print(msg, ":", val.size())
+ else:
+ print(msg, ":", val)
+
+
+def fix_query_key_value_ordering(param, num_splits, num_heads, hidden_size):
+ # Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
+ # for compatibility with later versions of NVIDIA Megatron-LM.
+ # The inverse operation is performed inside Megatron-LM to read checkpoints:
+ # https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
+ # If param is the weight tensor of the self-attention block, the returned tensor
+ # will have to be transposed one more time to be read by HuggingFace GPT2.
+ input_shape = param.size()
+ # other versions store [num_heads * num_splits * hidden_size, :]
+ saved_shape = (num_heads, num_splits, hidden_size) + input_shape[1:]
+ param = param.view(*saved_shape)
+ param = param.transpose(0, 1).contiguous()
+ param = param.view(*input_shape)
+ return param
+
+
+def convert_megatron_checkpoint(sd_megatron, config):
+ """
+ Converts a Megatron checkpoint to a HuggingFace GPT-SW3 checkpoint.
+ """
+ n_positions = config.n_positions
+ layers = config.n_layer
+ vocab_size = config.vocab_size
+ heads = config.n_head
+ hidden_size_per_head = config.n_embd // config.n_head
+
+ word_embeddings = sd_megatron["model.language_model.embedding.word_embeddings.weight"][:vocab_size, :]
+ sd_hf = {
+ "transformer.wte.weight": word_embeddings,
+ "transformer.wpe.weight": sd_megatron["model.language_model.embedding.position_embeddings.weight"],
+ "transformer.ln_f.weight": sd_megatron["model.language_model.encoder.final_layernorm.weight"],
+ "transformer.ln_f.bias": sd_megatron["model.language_model.encoder.final_layernorm.bias"],
+ }
+
+ pf = "model.language_model.encoder.layers."
+ for i in range(layers):
+ causal_mask = torch.tril(torch.ones((n_positions, n_positions), dtype=torch.bool))
+ causal_mask = causal_mask.view(1, 1, n_positions, n_positions)
+ sd_hf[f"transformer.h.{i}.attn.bias"] = causal_mask
+ sd_hf[f"transformer.h.{i}.attn.masked_bias"] = torch.tensor(-1e4, dtype=torch.bfloat16)
+
+ sd_hf[f"transformer.h.{i}.ln_1.weight"] = sd_megatron[f"{pf}{i}.input_layernorm.weight"]
+ sd_hf[f"transformer.h.{i}.ln_1.bias"] = sd_megatron[f"{pf}{i}.input_layernorm.bias"]
+
+ val1 = sd_megatron[f"{pf}{i}.self_attention.query_key_value.weight"]
+ val1 = fix_query_key_value_ordering(val1, 3, heads, hidden_size_per_head)
+ sd_hf[f"transformer.h.{i}.attn.c_attn.weight"] = val1.transpose(0, 1).contiguous()
+
+ val2 = sd_megatron[f"{pf}{i}.self_attention.query_key_value.bias"]
+ val2 = fix_query_key_value_ordering(val2, 3, heads, hidden_size_per_head)
+ sd_hf[f"transformer.h.{i}.attn.c_attn.bias"] = val2
+
+ sd_hf[f"transformer.h.{i}.attn.c_proj.weight"] = sd_megatron[f"{pf}{i}.self_attention.dense.weight"].transpose(
+ 0, 1
+ )
+ sd_hf[f"transformer.h.{i}.attn.c_proj.bias"] = sd_megatron[f"{pf}{i}.self_attention.dense.bias"]
+ sd_hf[f"transformer.h.{i}.ln_2.weight"] = sd_megatron[f"{pf}{i}.post_attention_layernorm.weight"]
+ sd_hf[f"transformer.h.{i}.ln_2.bias"] = sd_megatron[f"{pf}{i}.post_attention_layernorm.bias"]
+ sd_hf[f"transformer.h.{i}.mlp.c_fc.weight"] = sd_megatron[f"{pf}{i}.mlp.dense_h_to_4h.weight"].transpose(0, 1)
+ sd_hf[f"transformer.h.{i}.mlp.c_fc.bias"] = sd_megatron[f"{pf}{i}.mlp.dense_h_to_4h.bias"]
+ sd_hf[f"transformer.h.{i}.mlp.c_proj.weight"] = sd_megatron[f"{pf}{i}.mlp.dense_4h_to_h.weight"].transpose(
+ 0, 1
+ )
+ sd_hf[f"transformer.h.{i}.mlp.c_proj.bias"] = sd_megatron[f"{pf}{i}.mlp.dense_4h_to_h.bias"]
+
+ # For LM head, transformers' wants the matrix to weight embeddings.
+ sd_hf["lm_head.weight"] = word_embeddings
+
+ return sd_hf
+
+
+def copy_config(config_hf, config_megatron):
+ """Copy the config from Megatron to hf."""
+ config_hf.vocab_size = 64000
+ config_hf.n_positions = config_megatron["encoder_seq_length"]
+ config_hf.n_embd = config_megatron["hidden_size"]
+ config_hf.n_layer = config_megatron["num_layers"]
+ config_hf.n_head = config_megatron["num_attention_heads"]
+ config_hf.n_inner = config_megatron["ffn_hidden_size"]
+ config_hf.activation_function = "gelu"
+ config_hf.resid_pdrop = 0.1
+ config_hf.embd_pdrop = 0.1
+ config_hf.attn_pdrop = 0.1
+ config_hf.layer_norm_epsilon = config_megatron["layernorm_epsilon"] # 1e-5
+ config_hf.initializer_range = config_megatron["init_method_std"] # 0.02
+ config_hf.apply_query_key_layer_scaling = config_megatron["apply_query_key_layer_scaling"] # True
+ config_hf.normalize_attention_scores = True
+ config_hf.use_cache = True
+
+ # This identifies the 6.7B (7B) model which uses a different tokenizer
+ if config_megatron["hidden_size"] == 4096:
+ config_hf.bos_token_id = 1 # <|endoftext|>
+ config_hf.eos_token_id = 1 # <|endoftext|>
+ config_hf.pad_token_id = 0 #
+ else:
+ config_hf.bos_token_id = 2 #
+ config_hf.eos_token_id = 3 # <|endoftext|>
+ config_hf.pad_token_id = 0 #
+
+ return config_hf
+
+
+def main(args):
+ print(args)
+
+ checkpoint_path = args.checkpoint_path
+ save_path = args.save_path
+ if isfile(checkpoint_path):
+ raise FileNotFoundError(f"ERROR! could not find file {checkpoint_path}")
+
+ # Load the model.
+ checkpoint = torch.load(checkpoint_path, map_location="cpu")
+
+ # Load the config.
+ config_megatron = checkpoint["hyper_parameters"]["cfg"]
+ config_hf = GPT2Config()
+ config_hf = copy_config(config_hf=config_hf, config_megatron=config_megatron)
+ config_hf.architectures = ["GPT2LMHeadModel"]
+
+ sd_megatron = checkpoint["state_dict"]
+
+ # Convert.
+ print("Converting")
+ sd_hf = convert_megatron_checkpoint(sd_megatron, config_hf)
+
+ # Print the structure of converted state dict.
+ if args.print_checkpoint_structure:
+ recursive_print(None, sd_hf)
+
+ config_hf.tokenizer_class = "GPTSw3Tokenizer"
+
+ # Store the config to file.
+ print("Saving config")
+ config_hf.save_pretrained(save_path)
+
+ # Store the state_dict to file.
+ output_checkpoint_file = os.path.join(save_path, "pytorch_model.bin")
+ print(f'Saving checkpoint to "{output_checkpoint_file}"')
+ torch.save(sd_hf, output_checkpoint_file)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--checkpoint_path",
+ type=str,
+ required=True,
+ help="e.g. megatron_gpt--val_loss=2.42-step=38000-consumed_samples=54720000",
+ )
+ parser.add_argument("--save_path", type=str, required=True, help="e.g. /home/user/gpt-sw3/hf")
+ parser.add_argument("--print-checkpoint-structure", action="store_true")
+ _args = parser.parse_args()
+ main(_args)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/gpt_sw3/tokenization_gpt_sw3.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/gpt_sw3/tokenization_gpt_sw3.py
new file mode 100644
index 0000000000000000000000000000000000000000..d740c13d3594a2a18dd5b3e64ffcd3a25c8fce21
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/gpt_sw3/tokenization_gpt_sw3.py
@@ -0,0 +1,342 @@
+"""The tokenizer used by the GPT-SW3 models."""
+
+import os
+import re
+import unicodedata
+from shutil import copyfile
+from typing import Any, Dict, List, Optional, Tuple, Union
+
+import sentencepiece as spm
+
+from ...tokenization_utils import PreTrainedTokenizer
+from ...utils import is_torch_available, logging
+
+
+if is_torch_available():
+ import torch
+
+
+logger = logging.get_logger(__name__)
+VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {
+ "AI-Sweden-Models/gpt-sw3-126m": "https://huggingface.co/AI-Sweden-Models/gpt-sw3-126m/resolve/main/spiece.model",
+ "AI-Sweden-Models/gpt-sw3-356m": "https://huggingface.co/AI-Sweden-Models/gpt-sw3-356m/resolve/main/spiece.model",
+ "AI-Sweden-Models/gpt-sw3-1.3b": "https://huggingface.co/AI-Sweden-Models/gpt-sw3-1.3b/resolve/main/spiece.model",
+ "AI-Sweden-Models/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden-Models/gpt-sw3-6.7b/resolve/main/spiece.model",
+ "AI-Sweden-Models/gpt-sw3-6.7b-v2": "https://huggingface.co/AI-Sweden-Models/gpt-sw3-6.7b-v2/resolve/main/spiece.model",
+ "AI-Sweden-Models/gpt-sw3-20b": "https://huggingface.co/AI-Sweden-Models/gpt-sw3-20b/resolve/main/spiece.model",
+ "AI-Sweden-Models/gpt-sw3-40b": "https://huggingface.co/AI-Sweden-Models/gpt-sw3-20b/resolve/main/spiece.model",
+ }
+}
+
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
+ "AI-Sweden-Models/gpt-sw3-126m": 2048,
+ "AI-Sweden-Models/gpt-sw3-356m": 2048,
+ "AI-Sweden-Models/gpt-sw3-1.3b": 2048,
+ "AI-Sweden-Models/gpt-sw3-6.7b": 2048,
+ "AI-Sweden-Models/gpt-sw3-6.7b-v2": 2048,
+ "AI-Sweden-Models/gpt-sw3-20b": 2048,
+ "AI-Sweden-Models/gpt-sw3-40b": 2048,
+}
+
+
+class GPTSw3Tokenizer(PreTrainedTokenizer):
+ """
+ Construct an GPTSw3 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Example usage:
+ ```python
+ >>> from transformers import GPTSw3Tokenizer
+
+ >>> tokenizer = GPTSw3Tokenizer.from_pretrained("AI-Sweden-Models/gpt-sw3-126m")
+ >>> tokenizer("Svenska är kul!")["input_ids"]
+ [1814, 377, 3617, 63504]
+ ```
+
+ Args:
+ vocab_file (`str`):
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
+ contains the vocabulary necessary to instantiate a tokenizer.
+ do_lower_case (`bool`, *optional*, defaults to `False`):
+ Whether or not to lowercase the input when tokenizing.
+ remove_space (`bool`, *optional*, defaults to `False`):
+ Whether or not to strip the text when tokenizing (removing excess spaces before and after the string).
+ keep_accents (`bool`, *optional*, defaults to `False`):
+ Whether or not to keep accents when tokenizing.
+ pad_token (`str`, *optional*):
+ The token used for padding, for example when batching sequences of different lengths. If not provided, will
+ default to '' or '' depending on model size.
+ unk_token (`str`, *optional*):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead. If not provided, will default to ''.
+ eos_token (`str`, *optional*):
+ The end of sequence token seen during pretraining. If not provided, will default to '<|endoftext|>'
+ bos_token (`str`, *optional*):
+ The beginning of sequence token that can be used for downstream task, was not seen during pretraining. If
+ not provided, will default to '' or '<|endoftext|>', depending on model size.
+ sp_model_kwargs (`dict`, *optional*):
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
+ to set:
+
+ - `enable_sampling`: Enable subword regularization.
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
+
+ - `nbest_size = {0,1}`: No sampling is performed.
+ - `nbest_size > 1`: samples from the nbest_size results.
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
+ using forward-filtering-and-backward-sampling algorithm.
+
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
+ BPE-dropout.
+
+ Attributes:
+ sp_model (`SentencePieceProcessor`):
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
+ whitespaces (`set`):
+ The whitespaces that are replaced in the whitespace normalization in preprocessing.
+ non_printing_characters_re (`Pattern`):
+ The compiled regular expression to remove non-printing characters in preprocessing.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file,
+ do_lower_case=False,
+ remove_space=False,
+ keep_accents=False,
+ pad_token=None,
+ unk_token=None,
+ eos_token=None,
+ bos_token=None,
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
+ **kwargs,
+ ) -> None:
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+
+ name_or_path = kwargs.get("name_or_path")
+ if name_or_path is None:
+ logger.warning(
+ "name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
+ " you are testing the model, this can safely be ignored"
+ )
+ name_or_path = "None"
+
+ # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
+ eos_token = "<|endoftext|>" if eos_token is None else eos_token
+ unk_token = "" if unk_token is None else unk_token
+ if "gpt-sw3-7b" in name_or_path:
+ pad_token = unk_token if pad_token is None else pad_token
+ bos_token = eos_token if bos_token is None else bos_token
+ else:
+ pad_token = "" if pad_token is None else pad_token
+ bos_token = "" if bos_token is None else bos_token
+
+ self.do_lower_case = do_lower_case
+ self.remove_space = remove_space
+ self.keep_accents = keep_accents
+ self.vocab_file = vocab_file
+
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(vocab_file)
+
+ # Used for whitespace normalization in input texts
+ # fmt : off
+ self.whitespaces = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
+ # fmt : on
+
+ # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
+ self.non_printing_characters_re = re.compile(
+ f"[{''.join(map(chr, list(range(0, 9)) + list(range(11, 32)) + list(range(127, 160)) + [160, 173, 8203]))}]"
+ )
+
+ super().__init__(
+ do_lower_case=do_lower_case,
+ remove_space=remove_space,
+ keep_accents=keep_accents,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ sp_model_kwargs=self.sp_model_kwargs,
+ **kwargs,
+ )
+
+ # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.__getstate__
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["sp_model"] = None
+ return state
+
+ # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.__setstate__
+ def __setstate__(self, d):
+ self.__dict__ = d
+
+ # for backward compatibility
+ if not hasattr(self, "sp_model_kwargs"):
+ self.sp_model_kwargs = {}
+
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(self.vocab_file)
+
+ @property
+ # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
+ def vocab_size(self) -> int:
+ return len(self.sp_model)
+
+ def preprocess_text(self, text: str) -> str:
+ """
+ Returns the preprocessed text. This procedure is identical to what was used when training the tokenizer.
+ """
+
+ # Remove non-printing characters
+ text = self.non_printing_characters_re.sub("", text)
+
+ # Normalize whitespaces
+ text = "".join([char if char not in self.whitespaces else " " for char in text])
+
+ # NFC Unicode normalization
+ text = unicodedata.normalize("NFC", text)
+ return text
+
+ def _tokenize(self, text: str, **kwargs) -> List[str]:
+ text = self.preprocess_text(text)
+ return self.sp_model.encode(text, out_type=str)
+
+ def _convert_token_to_id(self, token: str) -> int:
+ """Converts a token (str) to an id (int) using the vocab."""
+ return self.sp_model.PieceToId(token)
+
+ def _convert_id_to_token(self, index: int) -> str:
+ """Converts an index (int) to a token (str) using the vocab."""
+ return self.sp_model.IdToPiece(index)
+
+ @staticmethod
+ def clean_up_tokenization(out_string: str) -> str:
+ """Returns the input string, this function is overridden to remove the default clean up."""
+ return out_string
+
+ def convert_tokens_to_string(self, tokens: List[str]) -> str:
+ """Converts a sequence of tokens (strings) to a single string. Special tokens remain intact."""
+ current_sub_tokens = []
+ out_string = ""
+ prev_is_special = False
+ for token in tokens:
+ # make sure that special tokens are not decoded using sentencepiece model
+ if token in self.all_special_tokens:
+ # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
+ if not prev_is_special:
+ out_string += " "
+
+ out_string += self.sp_model.decode(current_sub_tokens) + token
+ prev_is_special = True
+ current_sub_tokens = []
+ else:
+ current_sub_tokens.append(token)
+ prev_is_special = False
+ out_string += self.sp_model.decode(current_sub_tokens)
+
+ return out_string
+
+ # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.get_vocab
+ def get_vocab(self) -> Dict[str, int]:
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.save_vocabulary
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+ elif not os.path.isfile(self.vocab_file):
+ with open(out_vocab_file, "wb") as fi:
+ content_spiece_model = self.sp_model.serialized_model_proto()
+ fi.write(content_spiece_model)
+
+ return (out_vocab_file,)
+
+ def encode_fast(
+ self, text: Union[str, List[str]], return_tensors: Union[str, bool] = False
+ ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
+ """
+ Encodes a text or batch of texts to token ids using preprocessing and the raw SP tokenizer. This has reduced
+ functionality but is often much faster.
+
+ Does NOT handle special tokens correctly, these can manually be added as ids afterwards.
+
+ Does NOT support padding, these can manually be added as ids afterwards.
+
+ Use default HuggingFace tokenization methods for full functionality.
+
+ Args:
+ text (`str` or `List[str]`): One or several text(s) to convert to token ids.
+ return_tensors (`str` or `bool`): Returns PyTorch tensors if set to True or "pt"
+
+ Returns:
+ `List[int]`, `List[List[int]]`, or `torch.Tensor`: The encoded text(s) as token ids.
+ """
+
+ if isinstance(text, str):
+ text = self.preprocess_text(text)
+ token_ids = self.sp_model.encode(text)
+ else:
+ text = [self.preprocess_text(t) for t in text]
+ token_ids = self.sp_model.encode(text)
+
+ if return_tensors is True or return_tensors == "pt":
+ token_ids = torch.tensor(token_ids)
+
+ return token_ids
+
+ def decode_fast(self, token_ids: Union[int, List[int]]) -> str:
+ """
+ Encodes a text or batch of texts to token ids using preprocessing and the raw SP tokenizer. This has reduced
+ functionality but is often much faster.
+
+ Args:
+ token_ids (`int` or `List[int]`): Encoded token or text as token id(s).
+
+ Returns:
+ `str`: Decoded text
+ """
+
+ return self.sp_model.decode(token_ids)
+
+ @property
+ def default_chat_template(self):
+ """
+ This chat template formats messages like an instant messenger chat log, with "User:" and "Bot:" strings
+ preceding messages. BOS tokens are added between all messages.
+ """
+ logger.warning_once(
+ "\nNo chat template is defined for this tokenizer - using the default template "
+ f"for the {self.__class__.__name__} class. If the default is not appropriate for "
+ "your model, please set `tokenizer.chat_template` to an appropriate template. "
+ "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
+ )
+ return (
+ "{{ eos_token }}{{ bos_token }}"
+ "{% for message in messages %}"
+ "{% if message['role'] == 'user' %}{{ 'User: ' + message['content']}}"
+ "{% else %}{{ 'Bot: ' + message['content']}}{% endif %}"
+ "{{ message['text'] }}{{ bos_token }}"
+ "{% endfor %}"
+ "Bot:"
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..b3635ace91163577201f716c9d67e255f11ea55b
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__init__.py
@@ -0,0 +1,70 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_flax_available,
+ is_tf_available,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "configuration_gptsan_japanese": ["GPTSAN_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTSanJapaneseConfig"],
+ "tokenization_gptsan_japanese": ["GPTSanJapaneseTokenizer"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_gptsan_japanese"] = [
+ "GPTSAN_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "GPTSanJapaneseForConditionalGeneration",
+ "GPTSanJapaneseModel",
+ "GPTSanJapanesePreTrainedModel",
+ ]
+ _import_structure["tokenization_gptsan_japanese"] = [
+ "GPTSanJapaneseTokenizer",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_gptsan_japanese import GPTSAN_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTSanJapaneseConfig
+ from .tokenization_gptsan_japanese import GPTSanJapaneseTokenizer
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_gptsan_japanese import (
+ GPTSAN_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
+ GPTSanJapaneseForConditionalGeneration,
+ GPTSanJapaneseModel,
+ GPTSanJapanesePreTrainedModel,
+ )
+ from .tokenization_gptsan_japanese import GPTSanJapaneseTokenizer
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/tokenization_gptsan_japanese.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/tokenization_gptsan_japanese.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4b9c9d7d87cd4b0792d991e12c6a77099e036872
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/tokenization_gptsan_japanese.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/configuration_gptsan_japanese.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/configuration_gptsan_japanese.py
new file mode 100644
index 0000000000000000000000000000000000000000..c25e4b0e1ea2a950353b89ee3016f0a80338ac64
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/configuration_gptsan_japanese.py
@@ -0,0 +1,159 @@
+# coding=utf-8
+# Copyright 2023, HuggingFace Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" GPTSAN-japanese model configuration"""
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+GPTSAN_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "tanreinama/GPTSAN-2.8B-spout_is_uniform": (
+ "https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"
+ ),
+}
+
+
+class GPTSanJapaneseConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`GPTSanJapaneseModel`]. It is used to instantiate
+ a GPTSANJapanese model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the GPTSANJapanese
+ [Tanrei/GPTSAN-japanese](https://huggingface.co/Tanrei/GPTSAN-japanese) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Arguments:
+ vocab_size (`int`, *optional*, defaults to 36000):
+ Vocabulary size of the GPTSANJapanese model. Defines the number of different tokens that can be represented
+ by the `inputs_ids` passed when calling [`GPTSanJapaneseModel`].
+ max_position_embeddings (`int`, *optional*, defaults to 1280):
+ The maximum sequence length that this model might ever be used with. Defaults set this to 1280.
+ d_model (`int`, *optional*, defaults to 1024):
+ Size of the encoder layers and the pooler layer.
+ d_ff (`int`, *optional*, defaults to 8192):
+ Size of the intermediate feed forward layer in each `SwitchTransformersBlock`.
+ d_ext (`int`, *optional*, defaults to 4096):
+ Size of the intermediate feed forward layer in each Extra-layers.
+ d_spout (`int`, *optional*, defaults to 128):
+ Size of the `spout` vector.
+ num_switch_layers (`int`, *optional*, defaults to 10):
+ Number of layers in the Switch Transformer layer.
+ num_ext_layers (`int`, *optional*, defaults to 0):
+ Number of layers in the Extra-layers.
+ num_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ num_experts (`int`, *optional*, defaults to 16):
+ Number of experts for each SwitchTransformer layer.
+ expert_capacity (`int`, *optional*, defaults to 128):
+ Number of tokens that can be stored in each expert. If set to 1, the model will behave like a regular
+ Transformer.
+ dropout_rate (`float`, *optional*, defaults to 0.0):
+ The ratio for all dropout layers.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-5):
+ The epsilon used by the layer normalization layers.
+ router_bias (`bool`, *optional*, defaults to `False`):
+ Whether to add a bias to the router.
+ router_jitter_noise (`float`, *optional*, defaults to 0.0):
+ Amount of noise to add to the router. Set it to 0.0 during prediction or set small value (usually 1e-2)
+ during training.
+ router_dtype (`str`, *optional*, default to `"float32"`):
+ The `dtype` used for the routers. It is preferable to keep the `dtype` to `"float32"` as specified in the
+ *selective precision* discussion in [the paper](https://arxiv.org/abs/2101.03961).
+ router_ignore_padding_tokens (`bool`, *optional*, defaults to `False`):
+ Whether to ignore padding tokens when routing.
+ output_hidden_states (`bool`, *optional*, default to `False`):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ output_attentions (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the attentions tensors of all attention layers.
+ initializer_factor (`float`, *optional*, defaults to 0.002):
+ A factor for initializing all weight matrices.
+ output_router_logits (`bool`, *optional*, default to `False`):
+ Whether or not to return the router logits of all experts.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models)
+ """
+
+ model_type = "gptsan-japanese"
+ keys_to_ignore_at_inference = [
+ "past_key_values",
+ ]
+ attribute_map = {
+ "hidden_size": "d_model",
+ "num_attention_heads": "num_heads",
+ "num_hidden_layers": "num_layers",
+ }
+
+ def __init__(
+ self,
+ vocab_size=36000,
+ max_position_embeddings=1280,
+ d_model=1024,
+ d_ff=8192,
+ d_ext=4096,
+ d_spout=128,
+ num_switch_layers=10,
+ num_ext_layers=0,
+ num_heads=16,
+ num_experts=16,
+ expert_capacity=128,
+ dropout_rate=0.0,
+ layer_norm_epsilon=1e-5,
+ router_bias=False,
+ router_jitter_noise=0.0,
+ router_dtype="float32",
+ router_ignore_padding_tokens=False,
+ output_hidden_states=False,
+ output_attentions=False,
+ initializer_factor=0.002,
+ output_router_logits=False,
+ use_cache=True,
+ separator_token_id=35998,
+ pad_token_id=35995,
+ eos_token_id=35999,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.d_model = d_model
+ self.d_ff = d_ff
+ self.d_ext = d_ext
+ self.d_spout = d_spout
+ self.num_switch_layers = num_switch_layers
+ self.num_ext_layers = num_ext_layers
+ self.num_layers = num_switch_layers + num_ext_layers
+ self.num_heads = num_heads
+ self.num_experts = num_experts
+ self.expert_capacity = expert_capacity
+ self.dropout_rate = dropout_rate
+ self.layer_norm_epsilon = layer_norm_epsilon
+ self.router_bias = router_bias
+ self.router_jitter_noise = router_jitter_noise
+ self.router_dtype = router_dtype
+ self.router_ignore_padding_tokens = router_ignore_padding_tokens
+ self.output_hidden_states = output_hidden_states
+ self.output_attentions = output_attentions
+ self.initializer_factor = initializer_factor
+ self.output_router_logits = output_router_logits
+ self.use_cache = use_cache
+
+ super().__init__(
+ separator_token_id=separator_token_id,
+ pad_token_id=pad_token_id,
+ eos_token_id=eos_token_id,
+ **kwargs,
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/convert_gptsan_tf_checkpoint_to_pytorch.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/convert_gptsan_tf_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..a84d000d44390fe6ae821fb1cdfba968d40a2b93
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/convert_gptsan_tf_checkpoint_to_pytorch.py
@@ -0,0 +1,181 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Convert GPTSANJapanese checkpoints from the original repository to pytorch model."""
+
+import argparse
+import json
+import os
+from collections import OrderedDict
+
+import numpy as np
+import tensorflow as tf
+import torch
+
+
+def convert_tf_gptsan_to_pt(args):
+ parameter_file = os.path.join(args.tf_model_dir, "parameters.json")
+ params = json.loads(open(parameter_file).read())
+ if not params:
+ raise ValueError(
+ f"It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file."
+ )
+ if not args.output.endswith(".pt"):
+ args.output = args.output + ".pt"
+ new_state = OrderedDict()
+ with tf.device("/CPU:0"):
+ reader = tf.train.load_checkpoint(args.tf_model_dir)
+ shapes = reader.get_variable_to_shape_map()
+ for key_name in shapes.keys():
+ vnp = reader.get_tensor(key_name).astype(np.float16)
+ if key_name.endswith("/adam_m") or key_name.endswith("/adam_v"):
+ continue
+ if key_name.startswith("pasts/"):
+ if key_name.startswith("pasts/mlp"):
+ player = int(key_name[9])
+ elif key_name.startswith("pasts/out"):
+ player = 8
+ name = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
+ state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
+ new_state[name] = torch.tensor(state)
+ elif key_name.startswith("model/moe"):
+ player = int(key_name[9:].split("/")[0])
+ if key_name.endswith("/switch_gating/kernel"):
+ name = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
+ state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
+ new_state[name] = torch.tensor(state)
+ elif key_name.endswith("/softmlp/kernel"):
+ name = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
+ state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
+ new_state[name] = torch.tensor(state)
+ elif key_name.endswith("/wo/kernel") or key_name.endswith("/wi/kernel"):
+ nlayer = key_name[-9:-7]
+ for i in range(16):
+ name = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
+ state = (
+ vnp[i].transpose([1, 0]).copy()
+ ) # In Mesh-Tensorflow, it is one array, so it is divided
+ new_state[name] = torch.tensor(state)
+ elif key_name.startswith("model/mlp"):
+ player = int(key_name[9:].split("/")[0])
+ if key_name.endswith("/p1/kernel"):
+ name = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
+ state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
+ new_state[name] = torch.tensor(state)
+ elif key_name.endswith("/p1/bias"):
+ name = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
+ state = vnp.copy() # same because it is one dimensional
+ new_state[name] = torch.tensor(state)
+ elif key_name.endswith("/p2/kernel"):
+ name = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
+ state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
+ new_state[name] = torch.tensor(state)
+ elif key_name.endswith("/p2/bias"):
+ name = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
+ state = vnp.copy() # same because it is one dimensional
+ new_state[name] = torch.tensor(state)
+ elif key_name.startswith("model/ln"):
+ player = int(key_name[8:].split("/")[0])
+ if key_name.endswith("/b"):
+ name = "model.blocks.%d.feed_forward.norm.bias" % player
+ state = vnp.copy() # same because it is one dimensional
+ new_state[name] = torch.tensor(state)
+ elif key_name.endswith("/g"):
+ name = "model.blocks.%d.feed_forward.norm.weight" % player
+ state = vnp.copy() # same because it is one dimensional
+ new_state[name] = torch.tensor(state)
+ elif key_name.startswith("model/att"):
+ player = int(key_name[9:].split("/")[0])
+ if key_name.endswith("/qkv/kernel"):
+ state = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
+ state_q = state[:, 0, :, :]
+ state_k = state[:, 1, :, :]
+ state_v = state[:, 2, :, :]
+ state_q = (
+ state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]])
+ .transpose([1, 0])
+ .copy()
+ ) # Mesh-Tensorflow is a diagonal matrix
+ state_k = (
+ state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]])
+ .transpose([1, 0])
+ .copy()
+ ) # Mesh-Tensorflow is a diagonal matrix
+ state_v = (
+ state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]])
+ .transpose([1, 0])
+ .copy()
+ ) # Mesh-Tensorflow is a diagonal matrix
+ name = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
+ new_state[name] = torch.tensor(state_q)
+ name = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
+ new_state[name] = torch.tensor(state_k)
+ name = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
+ new_state[name] = torch.tensor(state_v)
+ elif key_name.endswith("/o/kernel"):
+ name = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
+ state = (
+ vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]]).transpose([1, 0]).copy()
+ ) # Mesh-Tensorflow is a diagonal matrix
+ new_state[name] = torch.tensor(state)
+ elif key_name.startswith("model/an"):
+ player = int(key_name[8:].split("/")[0])
+ if key_name.endswith("/b"):
+ name = "model.blocks.%d.self_attn.norm.bias" % player
+ state = vnp.copy() # same because it is one dimensional
+ new_state[name] = torch.tensor(state)
+ elif key_name.endswith("/g"):
+ name = "model.blocks.%d.self_attn.norm.weight" % player
+ state = vnp.copy() # same because it is one dimensional
+ new_state[name] = torch.tensor(state)
+ elif (
+ key_name.startswith("model/wte")
+ or key_name.startswith("model/wpe")
+ or key_name.startswith("model/ete")
+ ):
+ nlayer = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
+ key_name[-3:]
+ ]
+ name = "model.%s.weight" % nlayer
+ state = vnp.copy() # same in embedded
+ new_state[name] = torch.tensor(state)
+ if key_name.startswith("model/wte"):
+ name = "lm_head.weight"
+ state = vnp.copy() # same in embedded
+ new_state[name] = torch.tensor(state)
+ elif key_name.startswith("model/wob"):
+ name = "final_logits_bias"
+ state = vnp.copy() # same in embedded
+ state = state.reshape((1, -1))
+ new_state[name] = torch.tensor(state)
+ elif key_name == "model/dense/kernel":
+ name = "model.last_project.weight"
+ state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
+ new_state[name] = torch.tensor(state)
+ elif key_name == "model/dense_1/bias":
+ name = "model.last_project.bias"
+ state = vnp.copy() # same because it is one dimensional
+ new_state[name] = torch.tensor(state)
+ torch.save(new_state, args.output)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
+ )
+ parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
+ parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
+ args = parser.parse_args()
+ convert_tf_gptsan_to_pt(args)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py
new file mode 100644
index 0000000000000000000000000000000000000000..d9b7003050b11ac76b80b6b624bc226b570f0591
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py
@@ -0,0 +1,1345 @@
+# coding=utf-8
+# Copyright 2023 Toshiyuki Sakamoto(tanreinama) and HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch GPTSANJapanese model."""
+
+
+import copy
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.nn as nn
+
+from ...activations import ACT2FN
+from ...modeling_outputs import MoECausalLMOutputWithPast, MoEModelOutputWithPastAndCrossAttentions
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ DUMMY_INPUTS,
+ DUMMY_MASK,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_torch_fx_proxy,
+ logging,
+)
+from .configuration_gptsan_japanese import GPTSanJapaneseConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "GPTSanJapaneseConfig"
+_CHECKPOINT_FOR_DOC = "Tanrei/GPTSAN-japanese"
+
+####################################################
+# This dict contains ids and associated url
+# for the pretrained weights provided with the models
+####################################################
+GPTSAN_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "Tanrei/GPTSAN-japanese",
+ # See all GPTSAN-japanese models at https://huggingface.co/models?filter=gptsan-japanese
+]
+
+
+# Copied from transformers.models.switch_transformers.modeling_switch_transformers.router_z_loss_func
+def router_z_loss_func(router_logits: torch.Tensor) -> float:
+ r"""
+ Compute the router z-loss implemented in PyTorch.
+
+ The router z-loss was introduced in [Designing Effective Sparse Expert Models](https://arxiv.org/abs/2202.08906).
+ It encourages router logits to remain small in an effort to improve stability.
+
+ Args:
+ router_logits (`float`):
+ Input logits of shape [batch_size, sequence_length, num_experts]
+
+ Returns:
+ Scalar router z-loss.
+ """
+ num_groups, tokens_per_group, _ = router_logits.shape
+ log_z = torch.logsumexp(router_logits, dim=-1)
+ z_loss = log_z**2
+ return torch.sum(z_loss) / (num_groups * tokens_per_group)
+
+
+# Copied from transformers.models.switch_transformers.modeling_switch_transformers.load_balancing_loss_func
+def load_balancing_loss_func(router_probs: torch.Tensor, expert_indices: torch.Tensor) -> float:
+ r"""
+ Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
+
+ See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss
+ function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
+ experts is too unbalanced.
+
+ Args:
+ router_probs (`torch.Tensor`):
+ Probability assigned to each expert per token. Shape: [batch_size, seqeunce_length, num_experts].
+ expert_indices (`torch.Tensor`):
+ Indices tensor of shape [batch_size, seqeunce_length] identifying the selected expert for a given token.
+
+ Returns:
+ The auxiliary loss.
+ """
+ num_experts = router_probs.shape[-1]
+
+ # cast the expert indices to int64, otherwise one-hot encoding will fail
+ if expert_indices.dtype != torch.int64:
+ expert_indices = expert_indices.to(torch.int64)
+
+ if len(expert_indices.shape) == 2:
+ expert_indices = expert_indices.unsqueeze(2)
+
+ expert_mask = torch.nn.functional.one_hot(expert_indices, num_experts)
+
+ # For a given token, determine if it was routed to a given expert.
+ expert_mask = torch.max(expert_mask, axis=-2).values
+
+ # cast to float32 otherwise mean will fail
+ expert_mask = expert_mask.to(torch.float32)
+ tokens_per_group_and_expert = torch.mean(expert_mask, axis=-2)
+
+ router_prob_per_group_and_expert = torch.mean(router_probs, axis=-2)
+ return torch.mean(tokens_per_group_and_expert * router_prob_per_group_and_expert) * (num_experts**2)
+
+
+class GPTSanJapaneseDenseActDense(nn.Module):
+ """
+ FFN Layer for Switch Transformer and Extra layers
+
+ GPTSAN can mix Switch Transformer layers and normal Transformer layers This class is used as Expert in Switch
+ Transformer layers and as FFN in regular Transformer layers. RELU is used in the Switch Transformer layer, and
+ Swish is used in the normal Transformer layer, so there is a choice of which is used in the argument.
+
+ """
+
+ def __init__(self, config: GPTSanJapaneseConfig, ext_layer=False):
+ super().__init__()
+ d_inter = config.d_ext if ext_layer else config.d_ff
+ self.wi = nn.Linear(config.d_model, d_inter, bias=ext_layer)
+ self.wo = nn.Linear(d_inter, config.d_model, bias=ext_layer)
+ self.dropout = nn.Identity() if ext_layer else nn.Dropout(config.dropout_rate)
+ self.act = ACT2FN["swish" if ext_layer else "relu"]
+
+ def forward(self, hidden_states):
+ r"""
+ Args:
+ hidden_states (`torch.Tensor`) :
+ [num_groups, tokens_per_group, hidden_dim] inputs to send to experts.
+ Returns:
+ torch.Tensor[num_groups, tokens_per_group, hidden_dim]
+
+ """
+ hidden_states = self.wi(hidden_states)
+ hidden_states = self.act(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.wo(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersTop1Router with SwitchTransformers->GPTSanJapanese
+class GPTSanJapaneseTop1Router(nn.Module):
+ """
+ Router using tokens choose top-1 experts assignment.
+
+ This router uses the same mechanism as in Switch Transformer (https://arxiv.org/abs/2101.03961) and V-MoE
+ (https://arxiv.org/abs/2106.05974): tokens choose their top experts. Items are sorted by router_probs and then
+ routed to their choice of expert until the expert's expert_capacity is reached. **There is no guarantee that each
+ token is processed by an expert**, or that each expert receives at least one token.
+
+ """
+
+ def __init__(self, config: GPTSanJapaneseConfig):
+ super().__init__()
+ self.num_experts = config.num_experts
+ self.expert_capacity = config.expert_capacity
+ self.classifier = nn.Linear(config.hidden_size, self.num_experts, bias=config.router_bias)
+ self.jitter_noise = config.router_jitter_noise
+ self.ignore_padding_tokens = config.router_ignore_padding_tokens
+ self.dtype = getattr(torch, config.router_dtype)
+
+ def _compute_router_probabilities(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
+ r"""
+ Computes router probabilities from input hidden states.
+
+ Args:
+ hidden_states (`torch.Tensor`):
+ (batch_size, sequence_length, hidden_dim) from which router probabilities are computed.
+ Returns:
+ router_probabilities (`torch.Tensor`):
+ Tensor of shape (batch_size, sequence_length, num_experts) corresponding to the probabilities for each
+ token and expert. Used for routing tokens to experts.
+ router_logits (`torch.Tensor`):
+ Logits tensor of shape (batch_size, sequence_length, num_experts) corresponding to raw router logits.
+ This is used later for computing router z-loss.
+ """
+ # float32 is used to ensure stability. See the discussion of "selective precision" in
+ # https://arxiv.org/abs/2101.03961.
+ # We also store the previous dtype to cast back the output to the previous dtype
+ self.input_dtype = hidden_states.dtype
+ hidden_states = hidden_states.to(self.dtype)
+
+ if self.training and self.jitter_noise > 0:
+ # Multiply the token inputs by the uniform distribution - adding some noise
+ hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise)
+
+ # Shape: [num_groups, tokens_per_group, num_experts]
+ self._cast_classifier()
+ router_logits = self.classifier(hidden_states)
+
+ # Apply Softmax and cast back to the original `dtype`
+ router_probabilities = nn.functional.softmax(router_logits, dim=-1, dtype=self.dtype).to(self.input_dtype)
+ return router_probabilities, router_logits
+
+ def _cast_classifier(self):
+ r"""
+ `bitsandbytes` `Linear8bitLt` layers does not support manual casting Therefore we need to check if they are an
+ instance of the `Linear8bitLt` class by checking special attributes.
+ """
+ if not (hasattr(self.classifier, "SCB") or hasattr(self.classifier, "CB")):
+ self.classifier = self.classifier.to(self.dtype)
+
+ def forward(self, hidden_states: torch.Tensor) -> Tuple:
+ r"""
+ Generic forward function for every Router class. Each Router expects to have the same input hidden states
+ (`hidden_states`) corresponding to the hidden states for each token, the `expert_capacity` corresponding to the
+ number of tokens the Router will send to each expert, some Routers can send up to few tokens to each expert.
+
+ Each Router works as the following: it expects the hidden states for each token, gets the `router_probs` and
+ `router_logits` from the `router_weights`. This will assign for each token, the raw probability to be assigned
+ to an expert. Then each Router class will have to define its own `_compute_routing_instructions`.
+
+ Args:
+ hidden_states (`torch.Tensor`) :
+ [num_groups, tokens_per_group, hidden_dim] inputs to send to experts.
+ Returns:
+ Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`] Tuple containing the expert index, the router probs
+ and the router logits. The router probabilities and logits are required to compute the loss.
+ """
+ router_probs, router_logits = self._compute_router_probabilities(hidden_states)
+
+ expert_index = torch.argmax(router_probs, dim=-1)
+ expert_index = torch.nn.functional.one_hot(expert_index, num_classes=self.num_experts)
+
+ # Mask tokens outside expert capacity. Sum over each sequence
+ token_priority = torch.cumsum(expert_index, dim=-2)
+ # mask if the token routed to to the expert will overflow
+ expert_capacity_mask = token_priority <= self.expert_capacity
+ expert_index = expert_index * expert_capacity_mask
+
+ router_probs = torch.max(router_probs, dim=-1).values.unsqueeze(-1)
+ return expert_index, router_probs, router_logits
+
+
+# Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersSparseMLP with SwitchTransformers->GPTSanJapanese
+class GPTSanJapaneseSparseMLP(nn.Module):
+ r"""
+ Implementation of the Switch Transformers Sparse MLP module.
+ """
+
+ def __init__(self, config: GPTSanJapaneseConfig, expert_class: nn.Module = GPTSanJapaneseDenseActDense):
+ super().__init__()
+ # Step 1: Get the correct router according to its class
+ self.router = GPTSanJapaneseTop1Router(config)
+
+ # Step 2: Get the experts
+ self.experts = nn.ModuleDict()
+ for idx in range(config.num_experts):
+ self.experts[f"expert_{idx}"] = expert_class(config)
+
+ def forward(self, hidden_states):
+ r"""
+ Hold on, this will be slightly tricky to understand In the correct order, a MoE layer does the following:
+
+ 1- Gets the `router_mask` from the router. The shape of the mask is `(batch_size, sequence_length, num_expert)`
+ and corresponds to the argmax of the `router_probs`. The probabilities are needed in the computation of the
+ hidden states : they are broadcasted to the hidden states values (can be interpreted as a scaling factor).
+
+ 2- Dispatch the tokens to its associated experts. We do a classic for loop over the experts and assign for each
+ expert the corresponding hidden states.
+
+ """
+ # Step 1: Get the router_mask from the router as wel as the probabilities
+ router_mask, router_probs, router_logits = self.router(hidden_states)
+ expert_index = torch.argmax(router_mask, dim=-1)
+
+ # The routers introduced might not always map all the tokens, to a router, which means that some hidden states
+ # can be unchanged from one layer to another. That is why the hidden states are cloned before updating only the seleced ones.
+
+ next_states = hidden_states.clone()
+ for idx, expert in enumerate(self.experts.values()):
+ token_indices = router_mask[:, :, idx].bool()
+ next_states[token_indices] = expert(hidden_states[token_indices]).to(next_states.dtype)
+
+ hidden_states = router_probs * next_states
+ return hidden_states, (router_logits, expert_index)
+
+
+class GPTSanJapaneseLayerSparseFF(nn.Module):
+ r"""
+ Switch Transformers Feed Forward layer module. This is a wrapper around the Mixture of Experts module.
+
+ Parameters:
+ config : ([`GPTSanJapaneseConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+ """
+
+ def __init__(self, config: GPTSanJapaneseConfig):
+ super().__init__()
+ self.mlp = GPTSanJapaneseSparseMLP(config)
+ self.soft_bypass_mlp = nn.Linear(config.d_model, config.d_model, bias=False)
+ self.norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
+
+ def forward(self, hidden_states, output_router_logits):
+ r"""
+ Args:
+ hidden_states (`torch.Tensor`) :
+ [num_groups, tokens_per_group, hidden_dim] inputs to send to experts.
+ output_router_logits (`bool`) :
+ output experts router output.
+ Returns:
+ torch.Tensor[num_groups, tokens_per_group, hidden_dim]
+
+ """
+ forwarded_states, router_tuple = self.mlp(hidden_states)
+ forwarded_states += torch.tanh(self.soft_bypass_mlp(hidden_states))
+ output = hidden_states + self.norm(forwarded_states)
+
+ if output_router_logits and router_tuple is not None:
+ return output, router_tuple
+ else:
+ return output
+
+
+class GPTSanJapaneseLayerDenseFF(nn.Module):
+ r"""
+ Extra Transformers Feed Forward layer module.
+
+ Parameters:
+ config : ([`GPTSanJapaneseConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+ """
+
+ def __init__(self, config: GPTSanJapaneseConfig):
+ super().__init__()
+ # Check if it is a sparse layer, if not then it is a dense layer
+ self.mlp = GPTSanJapaneseDenseActDense(config, ext_layer=True)
+ self.norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
+
+ def forward(self, hidden_states):
+ r"""
+ Args:
+ hidden_states (`torch.Tensor`) :
+ [num_groups, tokens_per_group, hidden_dim] inputs to send to experts.
+ Returns:
+ torch.Tensor[num_groups, tokens_per_group, hidden_dim]
+
+ """
+ forwarded_states = self.mlp(hidden_states)
+ output = hidden_states + self.norm(forwarded_states)
+ return output
+
+
+# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->GPTSanJapanese
+class GPTSanJapaneseAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ is_decoder: bool = False,
+ bias: bool = True,
+ is_causal: bool = False,
+ config: Optional[GPTSanJapaneseConfig] = None,
+ ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.dropout = dropout
+ self.head_dim = embed_dim // num_heads
+ self.config = config
+
+ if (self.head_dim * num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+ self.is_causal = is_causal
+
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ key_value_states: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+
+ bsz, tgt_len, _ = hidden_states.size()
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scaling
+ # get key, value proj
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
+ # the provided `key_value_states` to support prefix tuning
+ if (
+ is_cross_attention
+ and past_key_value is not None
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
+ ):
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
+ key_states = key_states.reshape(*proj_shape)
+ value_states = value_states.reshape(*proj_shape)
+
+ src_len = key_states.size(1)
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
+
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
+ )
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if layer_head_mask is not None:
+ if layer_head_mask.size() != (self.num_heads,):
+ raise ValueError(
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
+ f" {layer_head_mask.size()}"
+ )
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ if output_attentions:
+ # this operation is a bit awkward, but it's required to
+ # make sure that attn_weights keeps its gradient.
+ # In order to do so, attn_weights have to be reshaped
+ # twice and have to be reused in the following
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
+ else:
+ attn_weights_reshaped = None
+
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ attn_output = torch.bmm(attn_probs, value_states)
+
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
+ attn_output = attn_output.transpose(1, 2)
+
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
+ # partitioned across GPUs when using tensor-parallelism.
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped, past_key_value
+
+
+class GPTSanJapaneseLayerSelfAttention(nn.Module):
+ """
+ Self Attention and Normalization Unit
+ """
+
+ def __init__(self, config, has_relative_attention_bias=False):
+ super().__init__()
+ self.self_attn = GPTSanJapaneseAttention(
+ embed_dim=config.d_model,
+ num_heads=config.num_heads,
+ is_decoder=True,
+ bias=has_relative_attention_bias,
+ )
+ self.norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
+
+ def forward(
+ self,
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = False,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]:
+ r"""
+ Self-attention and normalize block.
+
+ Args:
+ hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
+ if the model is configured as a decoder.
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up
+ decoding. If `past_key_values` are used, the user can optionally input only the last
+ `decoder_input_ids` (those that don't have their past key value states given to this model) of shape
+ `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
+ in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ head_mask (`numpy.ndarray` of shape `({0})`, `optional):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ Returns:
+ Tuple[torch.Tensor[num_groups, tokens_per_group, hidden_dim],...]
+ """
+ # Self Attention
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
+ atten_out = self.self_attn(
+ hidden_states=hidden_states,
+ past_key_value=self_attn_past_key_value,
+ attention_mask=(1 - attention_mask) * torch.finfo(hidden_states.dtype).min,
+ layer_head_mask=head_mask,
+ output_attentions=output_attentions,
+ )
+ if output_attentions:
+ attn_weights = (atten_out[1],)
+ else:
+ attn_weights = ()
+
+ attention_output = atten_out[0]
+
+ hidden = hidden_states + self.norm(attention_output)
+
+ if use_cache:
+ outputs = (hidden, atten_out[2]) # hidden, present, (attentions)
+ else:
+ outputs = (hidden,) # hidden, (attentions)
+
+ return outputs + attn_weights
+
+
+class GPTSanJapaneseBlock(nn.Module):
+ """
+ Self Attention and FFN Unit
+ """
+
+ def __init__(self, config, ext_layer=False):
+ super().__init__()
+ self.self_attn = GPTSanJapaneseLayerSelfAttention(config)
+ self.feed_forward = GPTSanJapaneseLayerDenseFF(config) if ext_layer else GPTSanJapaneseLayerSparseFF(config)
+
+ def forward(
+ self,
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = False,
+ output_attentions: Optional[bool] = False,
+ output_router_tuple: Optional[bool] = False,
+ ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]:
+ r"""
+ GPTSAN transformer block.
+
+ Args:
+ hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
+ if the model is configured as a decoder.
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up
+ decoding. If `past_key_values` are used, the user can optionally input only the last
+ `decoder_input_ids` (those that don't have their past key value states given to this model) of shape
+ `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
+ in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ head_mask (`numpy.ndarray` of shape `({0})`, `optional):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+ output_attentions (`bool`) :
+ output attention probabirities.
+ output_router_tuple:
+ output experts router logits and expert id.
+ Returns:
+ Tuple[torch.Tensor[num_groups, tokens_per_group, hidden_dim],...]
+ """
+ atten_out = self.self_attn(
+ hidden_states=hidden_states,
+ past_key_value=past_key_value,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ )
+ attention_output = atten_out[0]
+
+ if isinstance(self.feed_forward, GPTSanJapaneseLayerSparseFF):
+ sparse_out = self.feed_forward(attention_output, output_router_tuple)
+ if output_router_tuple:
+ hidden, router_tuple = sparse_out
+ else:
+ hidden = sparse_out
+ else:
+ hidden = self.feed_forward(attention_output)
+
+ outputs = (hidden,) + atten_out[1:]
+
+ if isinstance(self.feed_forward, GPTSanJapaneseLayerSparseFF) and output_router_tuple:
+ outputs += (router_tuple,)
+
+ return outputs
+
+
+class GPTSanJapanesePreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = GPTSanJapaneseConfig
+ base_model_prefix = "gptsan_japanese"
+ supports_gradient_checkpointing = False
+ _no_split_modules = ["GPTSanJapaneseBlock"]
+ _skip_keys_device_placement = "past_key_values"
+
+ @property
+ def dummy_inputs(self):
+ input_ids = torch.tensor(DUMMY_INPUTS)
+ input_mask = torch.tensor(DUMMY_MASK)
+ dummy_inputs = {
+ "input_ids": input_ids,
+ "attention_mask": input_mask,
+ }
+ return dummy_inputs
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ factor = self.config.initializer_factor # Used for testing weights initialization
+ if isinstance(module, nn.LayerNorm):
+ module.weight.data.fill_(factor * 1.0)
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
+ if hasattr(module, "bias") and module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=factor * 1.0)
+ elif isinstance(module, GPTSanJapaneseModel):
+ # Mesh TensorFlow embeddings initialization
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624
+ module.embed_tokens.weight.data.normal_(mean=0.0, std=factor * 1.0)
+ module.position_embeddings.weight.data.normal_(mean=0.0, std=factor * 1.0)
+ if hasattr(module, "extra_position_embeddings") and module.extra_position_embeddings is not None:
+ module.extra_position_embeddings.weight.data.normal_(mean=0.0, std=factor * 1.0)
+ elif isinstance(module, (GPTSanJapaneseModel, GPTSanJapaneseForConditionalGeneration)):
+ # Mesh TensorFlow embeddings initialization
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624
+ module.final_logits_bias.data.normal_(mean=0.0, std=factor * 1.0)
+ if hasattr(module, "lm_head") and not self.config.tie_word_embeddings:
+ module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0)
+ elif isinstance(module, GPTSanJapaneseDenseActDense):
+ # Mesh TensorFlow FF initialization
+ # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56
+ # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89
+ module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
+ if hasattr(module.wi, "bias") and module.wi.bias is not None:
+ module.wi.bias.data.zero_()
+ module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
+ if hasattr(module.wo, "bias") and module.wo.bias is not None:
+ module.wo.bias.data.zero_()
+ elif isinstance(module, GPTSanJapaneseAttention):
+ # Multi-headed attention
+ d_model = self.config.d_model
+ key_value_proj_dim = self.config.d_model
+ n_heads = self.config.num_heads
+ module.k_proj.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5))
+ module.v_proj.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5))
+ module.q_proj.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5))
+ module.out_proj.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5))
+ elif isinstance(module, GPTSanJapaneseSparseMLP):
+ # Mesh TensorFlow attention initialization to avoid scaling before softmax
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136
+ d_model = self.config.d_model
+ key_value_proj_dim = self.config.d_model
+ n_heads = self.config.num_heads
+ module.router.classifier.weight.data.normal_(mean=0.0, std=factor * 1)
+ for idx in range(self.config.num_experts):
+ module.experts[f"expert_{idx}"].wi.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
+ module.experts[f"expert_{idx}"].wo.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
+
+ # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right
+ def _shift_right(self, input_ids):
+ decoder_start_token_id = self.config.decoder_start_token_id
+ pad_token_id = self.config.pad_token_id
+
+ if decoder_start_token_id is None:
+ raise ValueError(
+ "self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id. "
+ "See T5 docs for more information."
+ )
+
+ # shift inputs to the right
+ if is_torch_fx_proxy(input_ids):
+ # Item assignment is not supported natively for proxies.
+ shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id)
+ shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1)
+ else:
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
+ shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
+ shifted_input_ids[..., 0] = decoder_start_token_id
+
+ if pad_token_id is None:
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
+ # replace possible -100 values in labels by `pad_token_id`
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
+
+ return shifted_input_ids
+
+
+GPTSAN_JAPANESE_START_DOCSTRING = r"""
+
+ The [GPTSAN-japanese](https://github.com/tanreinama/GPTSAN) model was proposed in General-purpose Swich transformer
+ based Japanese language model
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`GPTSanJapaneseConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+GPTSAN_JAPANESE_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. GPTSAN-japanese is a model that generates sentence
+ continuations or predicts tokens at mask positions. Special tokens required for inputs to the model are
+ automatically appended.
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ An input that masks the Prefix part in the Prefix-LM input. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **prefix** input,
+ - 0 for tokens that are **not-prefix** input.
+ spout (`torch.Tensor` of shape `(batch_size, config.d_spout)`):
+ This vector is transformed through an 8-layer FFN and can be used instead of `past_key_values`.
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
+ input (see `past_key_values`). This is useful if you want more control over how to convert
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`.
+ Router logits of the decoder model, useful to compute the auxiliary loss for Mixture of Experts models.
+"""
+
+
+@add_start_docstrings(
+ "The bare GPTSAN-japanese Model transformer outputting raw hidden-states without any specific head on top.",
+ GPTSAN_JAPANESE_START_DOCSTRING,
+)
+class GPTSanJapaneseModel(GPTSanJapanesePreTrainedModel):
+ def __init__(self, config: GPTSanJapaneseConfig):
+ super().__init__(config)
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.d_model)
+ self.config = copy.deepcopy(config)
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model)
+ self.last_project = nn.Linear(config.d_model, config.d_model, bias=True)
+ self.act = ACT2FN["swish"]
+
+ self.blocks = torch.nn.ModuleList([])
+ for _ in range(config.num_switch_layers):
+ self.blocks.append(GPTSanJapaneseBlock(config))
+ for _ in range(config.num_ext_layers):
+ self.blocks.append(GPTSanJapaneseBlock(config, ext_layer=True))
+
+ if config.num_ext_layers > 0:
+ self.extra_position_embeddings = nn.Embedding(config.max_position_embeddings, config.d_model)
+
+ if config.d_spout:
+ spouts = []
+ for _ in range(8):
+ spouts.append(nn.Linear(config.d_spout, config.d_spout, bias=False))
+ spouts.append(nn.Tanh())
+ spouts.append(nn.Linear(config.d_spout, config.num_layers * 2 * config.d_model, bias=False))
+ self.spout = nn.Sequential(*spouts)
+
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, new_embeddings):
+ self.embed_tokens = new_embeddings
+
+ @add_start_docstrings_to_model_forward(GPTSAN_JAPANESE_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.FloatTensor] = None,
+ spout: Optional[torch.FloatTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = False,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ output_router_logits: Optional[bool] = None,
+ num_precontext: Optional[torch.LongTensor] = None,
+ ) -> Union[MoEModelOutputWithPastAndCrossAttentions, Tuple[torch.FloatTensor]]:
+ r"""
+ num_precontext (`torch.LongTensor` of shape `(batch_size,1)`):
+ length of `hybrid` input tokens in the input. Tokens up to this length refer to both front and back like
+ BERT, tokens after that refer only to front like GPT. see also:
+ https://github.com/tanreinama/GPTSAN/blob/main/report/model.md
+
+ Returns:
+ `MoEModelOutputWithPastAndCrossAttentions` or `tuple` if `return_dict` returns
+ MoEModelOutputWithPastAndCrossAttentions insted of tuple
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ device = self.position_embeddings.weight.device
+ if input_ids is None:
+ input_ids = torch.zeros([1, 1]).int().to(device) # dummy for input_ids was None
+ num_pasts_contexts = 0
+ num_batch = input_ids.shape[0]
+ pasts_or_spout_value = None
+ if past_key_values is not None:
+ num_pasts_contexts = past_key_values[0][0].shape[2]
+ elif self.config.d_spout and spout is not None:
+ # `spout` is a special input vector specific to GPTSAN
+ # This controls the output by projecting embedded information such as the class of sentences during learning.
+ # It should passed instead of the first past_key_value.
+ # See the original GPTSAN repository for details
+ num_pasts_contexts += 1
+
+ # If there is an attention_mask, increase first one for spout
+ if self.config.d_spout and spout is not None and attention_mask is not None:
+ attention_mask_with_spout = torch.ones(num_batch, attention_mask.shape[1] + 1, device=device)
+ attention_mask_with_spout[:, 1:] -= 1 - attention_mask # 1st token should be spout
+ attention_mask = attention_mask_with_spout # update attention_mask
+
+ if num_precontext is not None:
+ # `num_precontext` is the number of tokens that refer to each other in prefix-lm
+ # created per batch, so dimension of num_precontext should be [batch, 1]
+ if not (
+ len(num_precontext.shape) == 2 and num_precontext.shape[1] == 1
+ ): # num_precontext Should be [batch,1]
+ raise ValueError("num_precontext should be [batch, 1] size.")
+ num_precontext = torch.reshape(num_precontext, [-1])
+ else:
+ num_precontext = torch.zeros([num_batch]).int().to(device)
+
+ num_input_contexts = input_ids.shape[1]
+ num_output_contexts = num_input_contexts + num_pasts_contexts
+
+ hidden_states = self.embed_tokens(input_ids)
+
+ if past_key_values is not None:
+ pasts_or_spout_value = past_key_values
+ elif self.config.d_spout and spout is not None:
+ # Make vector from `spout` of GPTSAN to the same shape as past_key_values
+ pasts_or_spout_value = self.spout(spout) # projecting `spout` vector
+ pasts_or_spout_value = torch.reshape(
+ pasts_or_spout_value,
+ [
+ num_batch,
+ self.config.num_layers,
+ 2,
+ self.config.num_heads,
+ num_pasts_contexts,
+ self.config.d_model // self.config.num_heads,
+ ],
+ )
+ pasts_or_spout_value = torch.split(pasts_or_spout_value, [1] * self.config.num_layers, dim=1)
+ # make same shape as past_key_values
+ pasts_or_spout_value = tuple(
+ tuple([b.squeeze(1) for b in torch.split(a.squeeze(1), [1, 1], dim=1)]) for a in pasts_or_spout_value
+ )
+ else:
+ pasts_or_spout_value = [None] * self.config.num_layers
+
+ # Token position considering spout and pasts
+ token_position = torch.arange(num_input_contexts).to(device) + num_pasts_contexts
+
+ if attention_mask is None:
+ attention_mask = torch.ones(num_batch, num_input_contexts, device=device)
+
+ # positions for get position_embeddings
+ gather_position = (
+ (
+ torch.zeros((num_batch, self.config.d_model, num_input_contexts)).to(device)
+ + token_position.unsqueeze(0)
+ )
+ .transpose(1, 2)
+ .long()
+ )
+ # When padding with padding_side="left", zeros line up on the left side of attention_mask, so position_embeddings is shifted accordingly
+ gather_position -= (1 - attention_mask).argmin(dim=-1).unsqueeze(1).unsqueeze(2)
+ gather_position = torch.clip(gather_position, num_pasts_contexts, self.config.max_position_embeddings - 1)
+
+ # attention_mask is applied per batch
+ for i in range(num_batch):
+ hidden_states[i] += torch.gather(self.position_embeddings.weight, dim=0, index=gather_position[i])
+
+ # Create a mask to be used when making the prefix Input length of Prefix-LM variable
+ causal_mask = (
+ torch.tril(torch.ones((num_output_contexts, num_output_contexts), dtype=torch.uint8))
+ .view(1, 1, num_output_contexts, num_output_contexts)
+ .to(device)
+ )
+ prefix_lm_mask = causal_mask[:, :, -num_input_contexts:, :]
+ if token_type_ids is not None:
+ token_type_ids = token_type_ids.unsqueeze(1).unsqueeze(2)
+ prefix_lm_mask = ((prefix_lm_mask + token_type_ids) > 0).float()
+ # Marge prefix_lm_mask and attention_mask
+ extended_attention_mask = prefix_lm_mask * attention_mask.unsqueeze(1).unsqueeze(2)
+
+ # Prepare head mask if needed
+ if head_mask is not None:
+ head_mask = self.get_head_mask(
+ head_mask, self.config.num_switch_layers + self.config.num_ext_layers
+ ) # n_layer x batch x n_heads x N x N
+
+ # outputs
+ present_key_value_states = () if self.config.use_cache or use_cache else None
+ all_hidden_states = () if self.config.output_hidden_states or output_hidden_states else None
+ all_attentions = () if self.config.output_attentions or output_attentions else None
+ all_router_probs = () if self.config.output_router_logits or output_router_logits else None
+
+ for layer, past in enumerate(pasts_or_spout_value):
+ if layer == self.config.num_switch_layers:
+ if self.config.num_ext_layers > 0:
+ # extra_position_embeddings are extra position embeddings that are only created when extending the model with code from the original GPTSAN repository. Not used in the default model.
+ # However, it is created when you create an additional layer and partially train only that location.
+ # Therefore, convert_gptsan_tf_checkpoint_to_pytorch.py is used when converting and loading models created in the original GPTSAN repository.
+ for i in range(num_batch):
+ hidden_states[i] += torch.gather(
+ self.extra_position_embeddings.weight, dim=0, index=gather_position[i]
+ )
+
+ output_router_tuple = (
+ self.config.output_router_logits or output_router_logits
+ ) and layer < self.config.num_switch_layers
+ block_output = self.blocks[layer](
+ hidden_states=hidden_states,
+ past_key_value=past,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ use_cache=self.config.use_cache or use_cache,
+ output_attentions=self.config.output_attentions or output_attentions,
+ output_router_tuple=output_router_tuple,
+ )
+
+ outpos = 0
+ hidden_states = block_output[outpos]
+ if self.config.output_hidden_states or output_hidden_states:
+ all_hidden_states += (hidden_states,)
+ if self.config.use_cache or use_cache:
+ outpos += 1
+ present = block_output[outpos]
+ present_key_value_states += (present,)
+ if self.config.output_attentions or output_attentions:
+ outpos += 1
+ attention_probs = block_output[outpos]
+ all_attentions += (attention_probs,)
+ if output_router_tuple:
+ outpos += 1
+ router_tuple = block_output[outpos]
+ all_router_probs.append(router_tuple[0])
+
+ hidden_states = self.last_project(hidden_states)
+ hidden_states = self.act(hidden_states)
+
+ if self.config.output_hidden_states or output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ hidden_states,
+ present_key_value_states,
+ all_hidden_states,
+ all_attentions,
+ all_router_probs,
+ ]
+ if v is not None
+ )
+
+ return MoEModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=present_key_value_states,
+ hidden_states=all_hidden_states,
+ attentions=all_attentions,
+ router_probs=all_router_probs,
+ )
+
+
+@add_start_docstrings(
+ "The bare GPTSAN-japanese Model with a language modeling head.",
+ GPTSAN_JAPANESE_START_DOCSTRING,
+)
+class GPTSanJapaneseForConditionalGeneration(GPTSanJapanesePreTrainedModel):
+ _tied_weights_keys = ["lm_head.weight"]
+
+ def __init__(self, config: GPTSanJapaneseConfig):
+ super().__init__(config)
+ self.model = GPTSanJapaneseModel(config)
+ self.register_buffer("final_logits_bias", torch.zeros([1, config.vocab_size]))
+ self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
+ if not self.config.torchscript:
+ self.lm_head.weight = self.model.embed_tokens.weight
+
+ @add_start_docstrings_to_model_forward(GPTSAN_JAPANESE_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.FloatTensor] = None,
+ spout: Optional[torch.FloatTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = False,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ output_router_logits: Optional[bool] = None,
+ labels: Optional[torch.LongTensor] = None,
+ ) -> Union[Tuple[torch.FloatTensor], MoECausalLMOutputWithPast]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for
+ labels in `[0, ..., config.vocab_size]`
+
+ Returns:
+ `MoECausalLMOutputWithPast` or `tuple` if `return_dict` returns MoECausalLMOutputWithPast insted of tuple
+
+ Example:
+
+ Text Generation with regular LM Model
+ ```python
+ >>> from transformers import AutoModel, AutoTokenizer, trainer_utils
+
+ >>> device = "cuda"
+ >>> model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").to(device)
+ >>> tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
+ >>> x_token = tokenizer("織田信長は、", return_tensors="pt")
+ >>> trainer_utils.set_seed(30)
+ >>> input_ids = x_token.input_ids.to(device)
+ >>> gen_token = model.generate(input_ids, max_new_tokens=50)
+ >>> tokenizer.decode(gen_token[0])
+ "織田信長は、政治・軍事の中枢まで掌握した政治家であり、日本史上類を見ない驚異的な軍事侵攻を続け..."
+ ```
+
+ Text Generation with Prefix-LM Model
+ ```python
+ >>> from transformers import AutoModel, AutoTokenizer, trainer_utils
+
+ >>> device = "cuda"
+ >>> model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").to(device)
+ >>> tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
+ >>> x_token = tokenizer("", prefix_text="織田信長は、", return_tensors="pt")
+ >>> trainer_utils.set_seed(30)
+ >>> input_ids = x_token.input_ids.to(device)
+ >>> token_type_ids = x_token.token_type_ids.to(device)
+ >>> gen_token = model.generate(input_ids, token_type_ids=token_type_ids, max_new_tokens=50)
+ >>> tokenizer.decode(gen_token[0])
+ "織田信長は、政治・外交で数々の戦果を上げるが、1568年からは、いわゆる本能寺の変で細川晴元に暗殺される..."
+ ```
+
+ Simultaneously Text Generation And Masked Language Model
+ ```python
+ >>> from transformers import AutoModel, AutoTokenizer, trainer_utils
+
+ >>> device = "cuda"
+ >>> model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").to(device)
+ >>> tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
+ >>> masked_sentence = "武田信玄は、<|inputmask|>時代ファンならぜひ押さえ<|inputmask|>きたい名将の一人。"
+ >>> x_token = tokenizer("", prefix_text=masked_sentence, return_tensors="pt")
+ >>> trainer_utils.set_seed(30)
+ >>> input_ids = x_token.input_ids.to(device)
+ >>> token_type_ids = x_token.token_type_ids.to(device)
+ >>> out_lm_token = model.generate(input_ids, token_type_ids=token_type_ids, max_new_tokens=50)
+ >>> out_mlm_token = model(input_ids, token_type_ids=token_type_ids).logits.argmax(axis=-1)
+ >>> tokenizer.decode(out_mlm_token[0])
+ "武田信玄は、戦国時代ファンならぜひ押さえておきたい名将の一人。"
+
+ >>> tokenizer.decode(out_lm_token[0][input_ids.shape[1] :])
+ "武田氏の三代に渡った武田家のひとり\n甲斐市に住む、日本史上最大の戦国大名。..."
+ ```"""
+ SEG_TOKEN = self.config.separator_token_id
+ use_cache = use_cache or self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ model_return_dict = True
+ num_precontext = None
+ if input_ids is not None:
+ num_batch = input_ids.shape[0]
+ num_precontext = torch.zeros([num_batch]).int().to(input_ids.device)
+ where_separators = torch.where(input_ids == SEG_TOKEN)
+ num_precontext[where_separators[0]] += where_separators[1]
+ num_precontext = num_precontext.unsqueeze(1)
+
+ outputs = self.model(
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ spout,
+ past_key_values,
+ head_mask,
+ use_cache,
+ inputs_embeds,
+ decoder_inputs_embeds,
+ output_attentions,
+ output_hidden_states,
+ model_return_dict,
+ output_router_logits,
+ num_precontext,
+ )
+
+ lm_logits = self.lm_head(outputs[0])
+ if lm_logits.shape[-1] == self.final_logits_bias.shape[-1]:
+ lm_logits = lm_logits + self.final_logits_bias
+
+ loss = None
+ z_loss = None
+ router_probs = None
+ aux_loss = None
+ if labels is not None:
+ # move labels to correct device to enable model parallelism
+ labels = labels.to(lm_logits.device)
+
+ loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
+
+ if output_router_logits:
+ # Compute the router loss (z_loss + auxiliary loss) for each router in the encoder and decoder
+ router_logits, expert_indexes = self._unpack_router_logits(outputs.router_probs)
+ z_loss = router_z_loss_func(router_logits)
+ router_probs = nn.Softmax(dim=-1)(router_logits)
+ aux_loss = load_balancing_loss_func(router_probs, expert_indexes)
+
+ loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ loss,
+ lm_logits,
+ outputs.past_key_values,
+ outputs.hidden_states,
+ outputs.router_probs,
+ z_loss,
+ aux_loss,
+ ]
+ if v is not None
+ )
+
+ return MoECausalLMOutputWithPast(
+ loss=loss,
+ logits=lm_logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ router_logits=outputs.router_probs,
+ z_loss=z_loss,
+ aux_loss=aux_loss,
+ )
+
+ def prepare_inputs_for_generation(
+ self,
+ input_ids: torch.LongTensor,
+ attention_mask: torch.FloatTensor,
+ token_type_ids: Optional[torch.FloatTensor] = None,
+ spout: Optional[Union[List, torch.FloatTensor]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ **kwargs,
+ ):
+ if isinstance(spout, list):
+ spout = torch.tensor(spout).float()
+ if input_ids is not None:
+ spout = spout.to(input_ids.device)
+ if past_key_values is not None:
+ return {
+ "input_ids": input_ids[:, -1:] if input_ids is not None else None,
+ "attention_mask": attention_mask,
+ "token_type_ids": token_type_ids[:, -1:] if token_type_ids is not None else None,
+ "spout": spout,
+ "past_key_values": past_key_values,
+ }
+ return {
+ "input_ids": input_ids,
+ "attention_mask": attention_mask,
+ "token_type_ids": token_type_ids,
+ "spout": spout,
+ "past_key_values": None,
+ }
+
+ # Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersForConditionalGeneration.prepare_decoder_input_ids_from_labels with SwitchTransformers->GPTSanJapanese
+ def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
+ return self._shift_right(labels)
+
+ # Copied from transformers.models.mbart.modeling_mbart.MBartForConditionalGeneration.resize_token_embeddings with MBart->GPTSanJapanese
+ def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None) -> nn.Embedding:
+ new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
+ self._resize_final_logits_bias(new_embeddings.weight.shape[0])
+ return new_embeddings
+
+ # Copied from transformers.models.mbart.modeling_mbart.MBartForConditionalGeneration._resize_final_logits_bias with MBart->GPTSanJapanese
+ def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
+ old_num_tokens = self.final_logits_bias.shape[-1]
+ if new_num_tokens <= old_num_tokens:
+ new_bias = self.final_logits_bias[:, :new_num_tokens]
+ else:
+ extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
+ new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
+ self.register_buffer("final_logits_bias", new_bias)
+
+ def get_input_embeddings(self):
+ return self.model.get_input_embeddings()
+
+ def set_input_embeddings(self, new_embeddings):
+ self.model.set_input_embeddings(new_embeddings)
+
+ # Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersForConditionalGeneration.set_output_embeddings with SwitchTransformers->GPTSanJapanese
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ # Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersForConditionalGeneration.get_output_embeddings with SwitchTransformers->GPTSanJapanese
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ # Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersForConditionalGeneration._unpack_router_logits with SwitchTransformers->GPTSanJapanese
+ def _unpack_router_logits(self, router_outputs):
+ total_router_logits = []
+ total_expert_indexes = []
+ for router_output in router_outputs:
+ if len(router_output[0].shape) > 1:
+ router_logits, expert_indexes = router_output
+ total_router_logits.append(router_logits)
+ total_expert_indexes.append(expert_indexes)
+ return torch.cat(total_router_logits, dim=1), torch.cat(total_expert_indexes, dim=1)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/tokenization_gptsan_japanese.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/tokenization_gptsan_japanese.py
new file mode 100644
index 0000000000000000000000000000000000000000..df3f94dc1e8965375022f267b80ab207c4683e0e
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/tokenization_gptsan_japanese.py
@@ -0,0 +1,541 @@
+# coding=utf-8
+# Copyright 2023 HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for GPTSANJapanese."""
+import collections
+import json
+import os
+import re
+from typing import List, Optional, Tuple, Union
+
+import numpy as np
+
+from ...tokenization_utils import PreTrainedTokenizer
+from ...tokenization_utils_base import (
+ BatchEncoding,
+ PreTokenizedInput,
+ PreTokenizedInputPair,
+ TextInput,
+ TextInputPair,
+ TruncationStrategy,
+)
+from ...utils import PaddingStrategy, logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {
+ "Tanrei/GPTSAN-japanese": "https://huggingface.co/Tanrei/GPTSAN-japanese/blob/main/vocab.txt",
+ },
+ "emoji_file": {
+ "Tanrei/GPTSAN-japanese": "https://huggingface.co/Tanrei/GPTSAN-japanese/blob/main/emoji.json",
+ },
+}
+
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
+ "Tanrei/GPTSAN-japanese": 1280,
+}
+
+
+def load_vocab_and_emoji(vocab_file, emoji_file):
+ """Loads a vocabulary file and emoji file into a dictionary."""
+ with open(emoji_file, "r", encoding="utf-8") as f:
+ emoji = json.loads(f.read())
+
+ vocab = collections.OrderedDict()
+ raw_vocab = collections.OrderedDict()
+ ids_to_tokens = collections.OrderedDict()
+ with open(vocab_file, "r", encoding="utf-8") as f:
+ token = f.readlines()
+ token = [[t.rstrip("\n")] if (t == ",\n" or "," not in t) else t.rstrip("\n").split(",") for t in token]
+ for idx, b in enumerate(token):
+ ids_to_tokens[idx] = b
+ raw_vocab[",".join(b)] = idx
+ for wd in b:
+ vocab[wd] = idx
+
+ return vocab, raw_vocab, ids_to_tokens, emoji
+
+
+class GPTSanJapaneseTokenizer(PreTrainedTokenizer):
+ """
+ This tokenizer is based on GPTNeoXJapaneseTokenizer and has the following modifications
+ - Decoding byte0~byte255 tokens correctly
+ - Added bagofword token handling
+ - Return token_type_ids for Prefix-LM model
+ The bagofword token represents a repetition of the previous token and is converted to 3 consecutive tokens when
+ decoding In addition, the original Japanese special Sub-Word-Encoding has been released in this repository
+ (https://github.com/tanreinama/Japanese-BPEEncoder_V2). The token_type_ids is a mask indicating the prefix input
+ position of the Prefix-LM model. To specify a prefix position, specify a prefix input for prefix_text, or specify a
+ sentence of the prefix part and the part after it as a text pair of batch input.
+
+ Example:
+
+ ```python
+ >>> from transformers import GPTSanJapaneseTokenizer
+
+ >>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
+ >>> # You can confirm both 慶応 and 慶應 are encoded to 17750
+ >>> tokenizer("吾輩は猫である🐯。実は慶応(慶應)大学出身")["input_ids"]
+ [35993, 35998, 34347, 31459, 30647, 31448, 25, 30659, 35729, 35676, 32417, 30647, 17750, 35589, 17750, 35590, 321, 1281]
+
+ >>> # Both 慶応 and 慶應 are decoded to 慶応
+ >>> tokenizer.decode(tokenizer("吾輩は猫である🐯。実は慶応(慶應)大学出身")["input_ids"])
+ '吾輩は猫である🐯。実は慶応(慶応)大学出身'
+ ```
+
+ Example for Prefix-LM:
+
+ ```python
+ >>> from transformers import GPTSanJapaneseTokenizer
+
+ >>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
+ >>> tokenizer("実は慶応(慶應)大学出身", prefix_text="吾輩は猫である🐯。")["input_ids"]
+ [35993, 34347, 31459, 30647, 31448, 25, 30659, 35729, 35676, 35998, 32417, 30647, 17750, 35589, 17750, 35590, 321, 1281]
+
+ >>> # Mask for Prefix-LM inputs
+ >>> tokenizer("実は慶応(慶應)大学出身", prefix_text="吾輩は猫である🐯。")["token_type_ids"]
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ ```
+
+ Example for batch encode:
+
+ ```python
+ >>> from transformers import GPTSanJapaneseTokenizer
+
+ >>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
+ >>> tokenizer([["武田信玄", "は、"], ["織田信長", "の配下の、"]], padding=True)["input_ids"]
+ [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
+
+ >>> # Mask for Prefix-LM inputs
+ >>> tokenizer([["武田信玄", "は、"], ["織田信長", "の配下の、"]], padding=True)["token_type_ids"]
+ [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
+
+ >>> # Mask for padding
+ >>> tokenizer([["武田信玄", "は、"], ["織田信長", "の配下の、"]], padding=True)["attention_mask"]
+ [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
+ ```
+
+ Args:
+ vocab_file (`str`):
+ File containing the vocabulary.
+ emoji_file (`str`):
+ File containing the emoji.
+ unk_token (`str`, *optional*, defaults to `"<|nottoken|>"`):
+ The token used for unknown charactor
+ pad_token (`str`, *optional*, defaults to `"<|separator|>"`):
+ The token used for padding
+ bos_token (`str`, *optional*, defaults to `"<|startoftext|>"`):
+ The beginning of sequence token.
+ eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
+ The end of sequence token.
+ sep_token (`str`, *optional*, defaults to `"<|segmenter|>"`):
+ A special token to separate token to prefix part and general input part.
+ do_clean_text (`bool`, *optional*, defaults to `False`):
+ Whether or not to clean text for URL, EMAIL, TEL, Japanese DATE and Japanese PRICE.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ model_input_names = ["input_ids", "attention_mask", "token_type_ids"]
+
+ def __init__(
+ self,
+ vocab_file,
+ emoji_file,
+ unk_token="<|nottoken|>",
+ pad_token="<|separator|>",
+ bos_token="<|startoftext|>",
+ eos_token="<|endoftext|>",
+ sep_token="<|segmenter|>",
+ do_clean_text=False,
+ **kwargs,
+ ):
+ if not os.path.isfile(vocab_file):
+ raise ValueError(
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
+ " model use `tokenizer = GPTSanJapaneseTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
+ )
+ if not os.path.isfile(emoji_file):
+ raise ValueError(
+ f"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"
+ " pretrained model use `tokenizer = GPTSanJapaneseTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
+ )
+ self.do_clean_text = do_clean_text
+ self.vocab, self.raw_vocab, self.ids_to_tokens, self.emoji = load_vocab_and_emoji(vocab_file, emoji_file)
+ self.subword_tokenizer = SubWordJapaneseTokenizer(
+ vocab=self.vocab, ids_to_tokens=self.ids_to_tokens, emoji=self.emoji
+ )
+
+ super().__init__(
+ unk_token=unk_token,
+ pad_token=pad_token,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ sep_token=sep_token,
+ do_clean_text=do_clean_text,
+ **kwargs,
+ )
+
+ @property
+ # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer.vocab_size
+ def vocab_size(self):
+ # self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
+ return len(self.raw_vocab)
+
+ # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer.get_vocab
+ def get_vocab(self):
+ return dict(self.raw_vocab, **self.added_tokens_encoder)
+
+ # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer._tokenize
+ def _tokenize(self, text):
+ return self.subword_tokenizer.tokenize(text, clean=self.do_clean_text)
+
+ # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer._convert_token_to_id
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
+
+ # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer._convert_id_to_token
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.subword_tokenizer.convert_id_to_token(index)
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ words = []
+ byte_tokens = []
+ for word in tokens:
+ if word[:6] == "<|byte" and word[-2:] == "|>":
+ byte_tokens.append(int(word[6:-2]))
+ else:
+ if len(byte_tokens) > 0:
+ words.append(bytearray(byte_tokens).decode("utf-8", errors="replace"))
+ byte_tokens = []
+ if word[:7] == "<|emoji" and word[-2:] == "|>":
+ words.append(self.emoji["emoji_inv"][word])
+ elif word == "":
+ words.append(" ")
+ elif word == "
":
+ words.append("\n")
+ elif word == "":
+ words.append("\t")
+ elif word == "":
+ words.append("▀")
+ elif word == "":
+ words.append("ǀ")
+ elif word == "":
+ words.append("‖")
+ elif word == "<|bagoftoken|>":
+ if len(words) > 0:
+ words.append(words[-1])
+ words.append(words[-1])
+ words.append(words[-1])
+ elif word.startswith("<|") and word.endswith("|>"):
+ words.append("")
+ else:
+ words.append(word)
+ if len(byte_tokens) > 0:
+ words.append(bytearray(byte_tokens).decode("utf-8", errors="replace"))
+ text = "".join(words)
+ return text
+
+ @property
+ def default_chat_template(self):
+ """
+ A simple chat template that adds standard BOS, SEP and EOS tokens between messages while discarding role
+ information.
+ """
+ logger.warning_once(
+ "\nNo chat template is defined for this tokenizer - using the default template "
+ f"for the {self.__class__.__name__} class. If the default is not appropriate for "
+ "your model, please set `tokenizer.chat_template` to an appropriate template. "
+ "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
+ )
+ return (
+ "{% for message in messages %}"
+ "{% if not loop.first %}{{ bos_token}}{% endif %}"
+ "{{ sep_token }}{{ message.content }} {{ eos_token }}"
+ "{% endfor %}"
+ )
+
+ # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer.save_vocabulary
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ index = 0
+ if os.path.isdir(save_directory):
+ vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+ emoji_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"]
+ )
+ else:
+ vocab_file = (
+ (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
+ )
+ emoji_file = (
+ (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
+ )
+ with open(vocab_file, "w", encoding="utf-8") as writer:
+ for token_index, token in self.ids_to_tokens.items():
+ if index != token_index:
+ logger.warning(
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
+ " Please check that the vocabulary is not corrupted!"
+ )
+ index = token_index
+ writer.write(",".join(token) + "\n")
+ index += 1
+ with open(emoji_file, "w", encoding="utf-8") as writer:
+ json.dump(self.emoji, writer)
+ return vocab_file, emoji_file
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ # docstyle-ignore
+ """
+ The tokenizer returns token_type_ids as separators between the Prefix part and the rest.
+ token_type_ids is 1 for the Prefix part and 0 for the rest of the token.
+
+ Example:
+ ```python
+ >>> from transformers import GPTSanJapaneseTokenizer
+
+ >>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
+ >>> x_token = tokenizer("アイウエ")
+ >>> # input_ids: | SOT | SEG | ア | イ | ウ | エ |
+ >>> # token_type_ids: | 1 | 0 | 0 | 0 | 0 | 0 |
+
+ >>> x_token = tokenizer("", prefix_text="アイウエ")
+ >>> # input_ids: | SOT | ア | イ | ウ | エ | SEG |
+ >>> # token_type_ids: | 1 | 1 | 1 | 1 | 1 | 0 |
+
+ >>> x_token = tokenizer("ウエ", prefix_text="アイ")
+ >>> # input_ids: | SOT | ア | イ | SEG | ウ | エ |
+ >>> # token_type_ids: | 1 | 1 | 1 | 0 | 0 | 0 |
+ ```"""
+ prefix_len = 0
+ if self.sep_token in self.vocab:
+ segid = self.vocab[self.sep_token]
+ if segid in token_ids_0:
+ prefix_len = token_ids_0.index(segid)
+ if token_ids_1 is None:
+ total_len = len(token_ids_0)
+ else:
+ total_len = len(token_ids_0 + token_ids_1)
+ return prefix_len * [1] + (total_len - prefix_len) * [0]
+
+ def prepare_for_tokenization(self, text, prefix_text=None, add_sep_token=None, **kwargs):
+ # GPTSAN inserts extra SEP tokens in Prefix-LM in addition to SOT for text generation.
+ # SOT at the beginning of the text, and SEP at the separator between the Prefix part and the rest.
+ if add_sep_token is None:
+ add_sep_token = self.sep_token not in text # If insert un-prefix position explicitly
+ prepared = self.bos_token if self.bos_token in self.vocab else ""
+ prepared += prefix_text if prefix_text is not None else ""
+ if add_sep_token:
+ prepared += self.sep_token if self.sep_token in self.vocab else ""
+ prepared += text
+ return (prepared, kwargs)
+
+ def _batch_encode_plus(
+ self,
+ batch_text_or_text_pairs: Union[
+ List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair]
+ ],
+ add_special_tokens: bool = True,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ is_split_into_words: bool = False,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[str] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ ) -> BatchEncoding:
+ # This tokenizer converts input text pairs into Prefix input and subsequent input
+ if isinstance(batch_text_or_text_pairs[0], tuple) or isinstance(tuple(batch_text_or_text_pairs[0]), list):
+ # As a single text with an explicit un-prefix position
+ batch_prefix_texts = []
+ for pref, txt in batch_text_or_text_pairs:
+ batch_prefix_texts.append(pref + self.sep_token + txt)
+ batch_text_or_text_pairs = batch_prefix_texts
+
+ return super()._batch_encode_plus(
+ batch_text_or_text_pairs,
+ add_special_tokens,
+ padding_strategy,
+ truncation_strategy,
+ max_length,
+ stride,
+ is_split_into_words,
+ pad_to_multiple_of,
+ return_tensors,
+ return_token_type_ids,
+ return_attention_mask,
+ return_overflowing_tokens,
+ return_special_tokens_mask,
+ return_offsets_mapping,
+ return_length,
+ verbose,
+ )
+
+
+class SubWordJapaneseTokenizer(object):
+ """
+ This tokenizer is based on GPTNeoXJapaneseTokenizer and has the following modifications
+ - Decoding byte0~byte255 tokens correctly
+ - Added bagofword token handling
+
+ https://github.com/tanreinama/Japanese-BPEEncoder_V2 This tokenizer class is under MIT Lisence according to the
+ original repository.
+
+ MIT License
+
+ Copyright (c) 2020 tanreinama
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+ documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
+ rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in all copies or substantial portions of
+ the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+ THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
+ """
+
+ # Copied from tokenization_gpt_neox_japanese.SubWordJapaneseTokenizer.__init__
+ def __init__(self, vocab, ids_to_tokens, emoji):
+ self.vocab = vocab # same as swe
+ self.ids_to_tokens = ids_to_tokens # same as bpe
+ self.emoji = emoji
+ self.maxlen = np.max([len(w) for w in self.vocab.keys()])
+ self.content_repatter1 = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
+ self.content_repatter2 = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
+ self.content_repatter3 = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
+ self.content_repatter4 = re.compile(
+ r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*"
+ )
+ self.content_repatter5 = re.compile(
+ r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*"
+ )
+ self.content_repatter6 = re.compile(
+ r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*"
+ )
+ keisen = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
+ blocks = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
+ self.content_trans1 = str.maketrans({k: "" for k in keisen + blocks})
+
+ # Copied from tokenization_gpt_neox_japanese.SubWordJapaneseTokenizer.__len__
+ def __len__(self):
+ return len(self.ids_to_tokens)
+
+ # Copied from tokenization_gpt_neox_japanese.SubWordJapaneseTokenizer.clean_text
+ def clean_text(self, content):
+ content = self.content_repatter1.sub("", content)
+ content = self.content_repatter2.sub("", content)
+ content = self.content_repatter3.sub("", content)
+ content = self.content_repatter4.sub("", content)
+ content = self.content_repatter5.sub("", content)
+ content = self.content_repatter6.sub("", content)
+ content = content.translate(self.content_trans1)
+ while "" in content:
+ content = content.replace("", "")
+ return content
+
+ # Copied from tokenization_gpt_neox_japanese.SubWordJapaneseTokenizer.tokenize
+ def tokenize(self, text, clean=False):
+ text = text.replace(" ", "")
+ text = text.replace(" ", "")
+ text = text.replace("\r\n", "
")
+ text = text.replace("\n", "
")
+ text = text.replace("\r", "
")
+ text = text.replace("\t", "")
+ text = text.replace("—", "ー")
+ text = text.replace("−", "ー")
+ for k, v in self.emoji["emoji"].items():
+ if k in text:
+ text = text.replace(k, v)
+ if clean:
+ text = self.clean_text(text)
+
+ def check_simbol(x):
+ e = x.encode()
+ if len(x) == 1 and len(e) == 2:
+ c = (int(e[0]) << 8) + int(e[1])
+ if (
+ (c >= 0xC2A1 and c <= 0xC2BF)
+ or (c >= 0xC780 and c <= 0xC783)
+ or (c >= 0xCAB9 and c <= 0xCBBF)
+ or (c >= 0xCC80 and c <= 0xCDA2)
+ ):
+ return True
+ return False
+
+ def checku2e(x):
+ e = x.encode()
+ if len(x) == 1 and len(e) == 3:
+ c = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
+ if c >= 0xE28080 and c <= 0xE2B07F:
+ return True
+ return False
+
+ pos = 0
+ result = []
+ while pos < len(text):
+ end = min(len(text), pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
+ candidates = [] # (token_id, token, pos)
+ for e in range(end, pos, -1):
+ wd = text[pos:e]
+ if wd in self.vocab:
+ if wd[0] == "<" and len(wd) > 2:
+ candidates = [(self.vocab[wd], wd, e)]
+ break
+ else:
+ candidates.append((self.vocab[wd], wd, e))
+ if len(candidates) > 0:
+ # the smallest token_id is adopted
+ _, wd, e = sorted(candidates, key=lambda x: x[0])[0]
+ result.append(wd)
+ pos = e
+ else:
+ end = pos + 1
+ wd = text[pos:end]
+ if check_simbol(wd):
+ result.append("")
+ elif checku2e(wd):
+ result.append("")
+ else:
+ for i in wd.encode("utf-8"):
+ result.append("<|byte%d|>" % i)
+ pos = end
+ return result
+
+ def convert_id_to_token(self, index):
+ return self.ids_to_tokens[index][0]
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..984f49973d1b3d5817be05b61b9a1983965ec9da
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/configuration_groupvit.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/configuration_groupvit.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..964ec48654a7fbfa7d19576b123a21b26a5ec469
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/configuration_groupvit.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/convert_groupvit_nvlab_to_hf.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/convert_groupvit_nvlab_to_hf.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1725b00237ddaef66a5cc63ae23d640f9166b458
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/convert_groupvit_nvlab_to_hf.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/modeling_groupvit.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/modeling_groupvit.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d9d6069644dc5f6b187bf5697d5cef14c24be192
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/modeling_groupvit.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/modeling_tf_groupvit.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/modeling_tf_groupvit.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fac93734e330a45ee5b6a2aa8b4bc6084075e2da
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/__pycache__/modeling_tf_groupvit.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/modeling_groupvit.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/modeling_groupvit.py
new file mode 100644
index 0000000000000000000000000000000000000000..c99c96ec87f89dcd9a9f080186ade7388494dfcb
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/modeling_groupvit.py
@@ -0,0 +1,1586 @@
+# coding=utf-8
+# Copyright 2022 NVIDIA and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch GroupViT model."""
+
+
+import collections.abc
+import math
+from dataclasses import dataclass
+from typing import Any, Optional, Tuple, Union
+
+import numpy as np
+import torch
+import torch.utils.checkpoint
+from torch import nn
+
+from ...activations import ACT2FN
+from ...modeling_attn_mask_utils import _create_4d_causal_attention_mask, _prepare_4d_attention_mask
+from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_groupvit import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "nvidia/groupvit-gcc-yfcc"
+
+GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "nvidia/groupvit-gcc-yfcc",
+ # See all GroupViT models at https://huggingface.co/models?filter=groupvit
+]
+
+
+# contrastive loss function, adapted from
+# https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html
+def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
+ return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device))
+
+
+# Copied from transformers.models.clip.modeling_clip.clip_loss with clip->groupvit
+def groupvit_loss(similarity: torch.Tensor) -> torch.Tensor:
+ caption_loss = contrastive_loss(similarity)
+ image_loss = contrastive_loss(similarity.t())
+ return (caption_loss + image_loss) / 2.0
+
+
+def hard_softmax(logits: torch.Tensor, dim: int):
+ y_soft = logits.softmax(dim)
+ # Straight through.
+ index = y_soft.max(dim, keepdim=True)[1]
+ y_hard = torch.zeros_like(logits, memory_format=torch.legacy_contiguous_format).scatter_(dim, index, 1.0)
+ ret = y_hard - y_soft.detach() + y_soft
+
+ return ret
+
+
+def gumbel_softmax(logits: torch.Tensor, tau: float = 1, hard: bool = False, dim: int = -1) -> torch.Tensor:
+ # more stable https://github.com/pytorch/pytorch/issues/41663
+ gumbel_dist = torch.distributions.gumbel.Gumbel(
+ torch.tensor(0.0, device=logits.device, dtype=logits.dtype),
+ torch.tensor(1.0, device=logits.device, dtype=logits.dtype),
+ )
+ gumbels = gumbel_dist.sample(logits.shape)
+
+ gumbels = (logits + gumbels) / tau # ~Gumbel(logits,tau)
+ y_soft = gumbels.softmax(dim)
+
+ if hard:
+ # Straight through.
+ index = y_soft.max(dim, keepdim=True)[1]
+ y_hard = torch.zeros_like(logits, memory_format=torch.legacy_contiguous_format).scatter_(dim, index, 1.0)
+ ret = y_hard - y_soft.detach() + y_soft
+ else:
+ # Reparametrization trick.
+ ret = y_soft
+ return ret
+
+
+def resize_attention_map(attentions, height, width, align_corners=False):
+ """
+ Args:
+ attentions (`torch.Tensor`): attention map of shape [batch_size, groups, feat_height*feat_width]
+ height (`int`): height of the output attention map
+ width (`int`): width of the output attention map
+ align_corners (`bool`, *optional*): the `align_corner` argument for `nn.functional.interpolate`.
+
+ Returns:
+ `torch.Tensor`: resized attention map of shape [batch_size, groups, height, width]
+ """
+
+ scale = (height * width // attentions.shape[2]) ** 0.5
+ if height > width:
+ feat_width = int(np.round(width / scale))
+ feat_height = attentions.shape[2] // feat_width
+ else:
+ feat_height = int(np.round(height / scale))
+ feat_width = attentions.shape[2] // feat_height
+
+ batch_size = attentions.shape[0]
+ groups = attentions.shape[1] # number of group token
+ # [batch_size, groups, height*width, groups] -> [batch_size, groups, height, width]
+ attentions = attentions.reshape(batch_size, groups, feat_height, feat_width)
+ attentions = nn.functional.interpolate(
+ attentions, size=(height, width), mode="bilinear", align_corners=align_corners
+ )
+ return attentions
+
+
+def get_grouping_from_attentions(attentions, hw_shape):
+ """
+ Args:
+ attentions (`tuple(torch.FloatTensor)`: tuple of attention maps returned by `GroupViTVisionTransformer`
+ hw_shape (`tuple(int)`): height and width of the output attention map
+ Returns:
+ `torch.Tensor`: the attention map of shape [batch_size, groups, height, width]
+ """
+
+ attn_maps = []
+ with torch.no_grad():
+ prev_attn_masks = None
+ for attn_masks in attentions:
+ # [batch_size, num_groups, height x width] -> [batch_size, height x width, num_groups]
+ attn_masks = attn_masks.permute(0, 2, 1).contiguous()
+ if prev_attn_masks is None:
+ prev_attn_masks = attn_masks
+ else:
+ prev_attn_masks = prev_attn_masks @ attn_masks
+ # [batch_size, heightxwidth, num_groups] -> [batch_size, num_groups, heightxwidth] -> [batch_size, num_groups, height, width]
+ cur_attn_map = resize_attention_map(prev_attn_masks.permute(0, 2, 1).contiguous(), *hw_shape)
+ attn_maps.append(cur_attn_map)
+
+ # [batch_size, num_groups, height, width]
+ final_grouping = attn_maps[-1]
+
+ return final_grouping
+
+
+class GroupViTCrossAttentionLayer(nn.Module):
+ def __init__(self, config: GroupViTVisionConfig):
+ super().__init__()
+ self.attn = GroupViTAttention(config)
+ self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.mlp = GroupViTMLP(config)
+ self.norm_post = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ def forward(self, query, key):
+ x = query
+ x = x + self.attn(query, encoder_hidden_states=key)[0]
+ x = x + self.mlp(self.norm2(x))
+ x = self.norm_post(x)
+ return x
+
+
+class GroupViTAssignAttention(nn.Module):
+ def __init__(self, config: GroupViTVisionConfig):
+ super().__init__()
+ self.scale = config.hidden_size**-0.5
+
+ self.q_proj = nn.Linear(config.hidden_size, config.hidden_size)
+ self.k_proj = nn.Linear(config.hidden_size, config.hidden_size)
+ self.v_proj = nn.Linear(config.hidden_size, config.hidden_size)
+ self.proj = nn.Linear(config.hidden_size, config.hidden_size)
+ self.assign_eps = config.assign_eps
+
+ def get_attn(self, attn, gumbel=True, hard=True):
+ if gumbel and self.training:
+ attn = gumbel_softmax(attn, dim=-2, hard=hard)
+ else:
+ if hard:
+ attn = hard_softmax(attn, dim=-2)
+ else:
+ attn = nn.functional.softmax(attn, dim=-2)
+
+ return attn
+
+ def forward(self, query, key):
+ value = key
+ # [batch_size, query_length, channels]
+ query = self.q_proj(query)
+
+ # [batch_size, key_length, channels]
+ key = self.k_proj(key)
+
+ # [batch_size, key_length, channels]
+ value = self.v_proj(value)
+
+ # [batch_size, query_length, key_length]
+ raw_attn = (query @ key.transpose(-2, -1)) * self.scale
+
+ attn = self.get_attn(raw_attn)
+ soft_attn = self.get_attn(raw_attn, gumbel=False, hard=False)
+
+ attn = attn / (attn.sum(dim=-1, keepdim=True) + self.assign_eps)
+
+ out = attn @ value
+
+ out = self.proj(out)
+
+ return out, soft_attn
+
+
+class GroupViTTokenAssign(nn.Module):
+ def __init__(self, config: GroupViTVisionConfig, num_group_token, num_output_group):
+ super().__init__()
+ self.num_output_group = num_output_group
+ # norm on group_tokens
+ self.norm_tokens = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ assign_mlp_ratio = (
+ config.assign_mlp_ratio
+ if isinstance(config.assign_mlp_ratio, collections.abc.Iterable)
+ else (config.assign_mlp_ratio, config.assign_mlp_ratio)
+ )
+ tokens_dim, channels_dim = [int(x * config.hidden_size) for x in assign_mlp_ratio]
+ self.mlp_inter = GroupViTMixerMLP(config, num_group_token, tokens_dim, num_output_group)
+ self.norm_post_tokens = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ # norm on x
+ self.norm_x = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.pre_assign_attn = GroupViTCrossAttentionLayer(config)
+
+ self.assign = GroupViTAssignAttention(config)
+ self.norm_new_x = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.mlp_channels = GroupViTMLP(config, config.hidden_size, channels_dim, config.hidden_size)
+
+ def project_group_token(self, group_tokens):
+ """
+ Args:
+ group_tokens (torch.Tensor): group tokens, [batch_size, num_group_tokens, channels]
+
+ Returns:
+ projected_group_tokens (torch.Tensor): [batch_size, num_output_groups, channels]
+ """
+ # [B, num_output_groups, C] <- [B, num_group_tokens, C]
+ projected_group_tokens = self.mlp_inter(group_tokens)
+ projected_group_tokens = self.norm_post_tokens(projected_group_tokens)
+ return projected_group_tokens
+
+ def forward(self, image_tokens, group_tokens):
+ """
+ Args:
+ image_tokens (`torch.Tensor`): image tokens, of shape [batch_size, input_length, channels]
+ group_tokens (`torch.Tensor`): group tokens, [batch_size, num_group_tokens, channels]
+ """
+
+ group_tokens = self.norm_tokens(group_tokens)
+ image_tokens = self.norm_x(image_tokens)
+ # [batch_size, num_output_groups, channels]
+ projected_group_tokens = self.project_group_token(group_tokens)
+ projected_group_tokens = self.pre_assign_attn(projected_group_tokens, image_tokens)
+ new_image_tokens, attention = self.assign(projected_group_tokens, image_tokens)
+ new_image_tokens += projected_group_tokens
+
+ new_image_tokens = new_image_tokens + self.mlp_channels(self.norm_new_x(new_image_tokens))
+
+ return new_image_tokens, attention
+
+
+@dataclass
+class GroupViTModelOutput(ModelOutput):
+ """
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
+ Contrastive loss for image-text similarity.
+ logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
+ The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
+ similarity scores.
+ logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
+ The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
+ similarity scores.
+ segmentation_logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels, logits_height, logits_width)`):
+ Classification scores for each pixel.
+
+
+
+ The logits returned do not necessarily have the same size as the `pixel_values` passed as inputs. This is
+ to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the
+ original image size as post-processing. You should always check your logits shape and resize as needed.
+
+
+
+ text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
+ The text embeddings obtained by applying the projection layer to the pooled output of
+ [`GroupViTTextModel`].
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
+ The image embeddings obtained by applying the projection layer to the pooled output of
+ [`GroupViTVisionModel`].
+ text_model_output (`BaseModelOutputWithPooling`):
+ The output of the [`GroupViTTextModel`].
+ vision_model_output (`BaseModelOutputWithPooling`):
+ The output of the [`GroupViTVisionModel`].
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits_per_image: torch.FloatTensor = None
+ logits_per_text: torch.FloatTensor = None
+ segmentation_logits: torch.FloatTensor = None
+ text_embeds: torch.FloatTensor = None
+ image_embeds: torch.FloatTensor = None
+ text_model_output: BaseModelOutputWithPooling = None
+ vision_model_output: BaseModelOutputWithPooling = None
+
+ def to_tuple(self) -> Tuple[Any]:
+ return tuple(
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
+ for k in self.keys()
+ )
+
+
+class GroupViTPatchEmbeddings(nn.Module):
+ """
+ Image to Patch Embedding.
+ """
+
+ def __init__(
+ self,
+ image_size: int = 224,
+ patch_size: Union[int, Tuple[int, int]] = 16,
+ num_channels: int = 3,
+ embed_dim: int = 768,
+ ):
+ super().__init__()
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_patches = num_patches
+
+ self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=patch_size)
+
+ def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor:
+ batch_size, num_channels, height, width = pixel_values.shape
+ if not interpolate_pos_encoding:
+ if height != self.image_size[0] or width != self.image_size[1]:
+ raise ValueError(
+ f"Input image size ({height}*{width}) doesn't match model"
+ f" ({self.image_size[0]}*{self.image_size[1]})."
+ )
+ x = self.projection(pixel_values).flatten(2).transpose(1, 2)
+ return x
+
+
+class GroupViTVisionEmbeddings(nn.Module):
+ def __init__(self, config: GroupViTVisionConfig):
+ super().__init__()
+
+ self.patch_embeddings = GroupViTPatchEmbeddings(
+ image_size=config.image_size,
+ patch_size=config.patch_size,
+ num_channels=config.num_channels,
+ embed_dim=config.hidden_size,
+ )
+ num_patches = self.patch_embeddings.num_patches
+ self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches, config.hidden_size))
+ self.dropout = nn.Dropout(config.dropout)
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.config = config
+
+ def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
+ """
+ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
+ resolution images.
+
+ Source:
+ https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174
+ """
+
+ npatch = embeddings.shape[1]
+ if npatch == self.position_embeddings.shape[1] and height == width:
+ return self.position_embeddings
+ patch_pos_embed = self.position_embeddings
+ num_original_pos_embed = patch_pos_embed.shape[1]
+ dim = embeddings.shape[-1]
+ feat_height = height // self.config.patch_size
+ feat_width = width // self.config.patch_size
+ # we add a small number to avoid floating point error in the interpolation
+ # see discussion at https://github.com/facebookresearch/dino/issues/8
+ feat_height, feat_width = feat_height + 0.1, feat_width + 0.1
+ original_height = original_width = math.sqrt(num_original_pos_embed)
+ reshaped_patch_pos_embed = patch_pos_embed.reshape(1, int(original_height), int(original_width), dim).permute(
+ 0, 3, 1, 2
+ )
+ scale_factor = (feat_height / original_height, feat_width / original_width)
+ patch_pos_embed = nn.functional.interpolate(
+ reshaped_patch_pos_embed,
+ scale_factor=scale_factor,
+ mode="bicubic",
+ align_corners=False,
+ )
+ patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
+ return patch_pos_embed
+
+ def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor:
+ batch_size, num_channels, height, width = pixel_values.shape
+ embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
+
+ embeddings = self.layernorm(embeddings)
+
+ batch_size, seq_len, _ = embeddings.size()
+
+ # add positional encoding to each token
+ if interpolate_pos_encoding:
+ embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
+ else:
+ embeddings = embeddings + self.position_embeddings
+
+ embeddings = self.dropout(embeddings)
+
+ return embeddings
+
+
+# Copied from transformers.models.clip.modeling_clip.CLIPTextEmbeddings with CLIP->GroupViT
+class GroupViTTextEmbeddings(nn.Module):
+ def __init__(self, config: GroupViTTextConfig):
+ super().__init__()
+ embed_dim = config.hidden_size
+
+ self.token_embedding = nn.Embedding(config.vocab_size, embed_dim)
+ self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim)
+
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
+ )
+
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ ) -> torch.Tensor:
+ seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]
+
+ if position_ids is None:
+ position_ids = self.position_ids[:, :seq_length]
+
+ if inputs_embeds is None:
+ inputs_embeds = self.token_embedding(input_ids)
+
+ position_embeddings = self.position_embedding(position_ids)
+ embeddings = inputs_embeds + position_embeddings
+
+ return embeddings
+
+
+class GroupViTStage(nn.Module):
+ """This corresponds to the `GroupingLayer` class in the GroupViT implementation."""
+
+ def __init__(
+ self,
+ config: GroupViTVisionConfig,
+ depth: int,
+ num_prev_group_token: int,
+ num_group_token: int,
+ num_output_group: int,
+ ):
+ super().__init__()
+ self.depth = depth
+ self.num_group_token = num_group_token
+ if num_group_token > 0:
+ self.group_token = nn.Parameter(torch.zeros(1, num_group_token, config.hidden_size))
+ else:
+ self.group_token = None
+ self.layers = nn.ModuleList([GroupViTEncoderLayer(config) for _ in range(depth)])
+
+ if num_group_token > 0:
+ self.downsample = GroupViTTokenAssign(
+ config=config,
+ num_group_token=num_group_token,
+ num_output_group=num_output_group,
+ )
+ else:
+ self.downsample = None
+
+ if num_prev_group_token > 0 and num_group_token > 0:
+ self.group_projector = nn.Sequential(
+ nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps),
+ GroupViTMixerMLP(config, num_prev_group_token, config.hidden_size // 2, num_group_token),
+ )
+ else:
+ self.group_projector = None
+
+ @property
+ def with_group_token(self):
+ return self.group_token is not None
+
+ def split_x(self, x):
+ if self.with_group_token:
+ return x[:, : -self.num_group_token], x[:, -self.num_group_token :]
+ else:
+ return x, None
+
+ def concat_x(self, x: torch.Tensor, group_token: Optional[torch.Tensor] = None) -> torch.Tensor:
+ if group_token is None:
+ return x
+ return torch.cat([x, group_token], dim=1)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ prev_group_token: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.FloatTensor]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ `(config.encoder_attention_heads,)`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the grouping tensors of Grouping block.
+ """
+ if self.with_group_token:
+ group_token = self.group_token.expand(hidden_states.size(0), -1, -1)
+ if self.group_projector is not None:
+ group_token = group_token + self.group_projector(prev_group_token)
+ else:
+ group_token = None
+
+ x = hidden_states
+
+ cat_x = self.concat_x(x, group_token)
+ for layer in self.layers:
+ layer_out = layer(cat_x, attention_mask=None, causal_attention_mask=None)
+ cat_x = layer_out[0]
+
+ x, group_token = self.split_x(cat_x)
+
+ attention = None
+ if self.downsample is not None:
+ x, attention = self.downsample(x, group_token)
+
+ outputs = (x, group_token)
+ if output_attentions:
+ outputs = outputs + (attention,)
+
+ return outputs
+
+
+class GroupViTMLP(nn.Module):
+ def __init__(
+ self,
+ config: GroupViTVisionConfig,
+ hidden_size: Optional[int] = None,
+ intermediate_size: Optional[int] = None,
+ output_size: Optional[int] = None,
+ ):
+ super().__init__()
+ self.config = config
+ self.activation_fn = ACT2FN[config.hidden_act]
+ hidden_size = hidden_size if hidden_size is not None else config.hidden_size
+ intermediate_size = intermediate_size if intermediate_size is not None else config.intermediate_size
+ output_size = output_size if output_size is not None else hidden_size
+ self.fc1 = nn.Linear(hidden_size, intermediate_size)
+ self.fc2 = nn.Linear(intermediate_size, output_size)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.fc1(hidden_states)
+ hidden_states = self.activation_fn(hidden_states)
+ hidden_states = self.fc2(hidden_states)
+ return hidden_states
+
+
+class GroupViTMixerMLP(GroupViTMLP):
+ def forward(self, x):
+ x = super().forward(x.transpose(1, 2))
+ return x.transpose(1, 2)
+
+
+class GroupViTAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.embed_dim = config.hidden_size
+ self.num_heads = config.num_attention_heads
+ self.head_dim = self.embed_dim // self.num_heads
+ if self.head_dim * self.num_heads != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
+ f" {self.num_heads})."
+ )
+ self.scale = self.head_dim**-0.5
+ self.dropout = config.attention_dropout
+
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ causal_attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ bsz, tgt_len, embed_dim = hidden_states.size()
+ is_cross_attention = encoder_hidden_states is not None
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scale
+ if is_cross_attention:
+ key_states = self._shape(self.k_proj(encoder_hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(encoder_hidden_states), -1, bsz)
+ else:
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
+ key_states = key_states.view(*proj_shape)
+ value_states = value_states.view(*proj_shape)
+
+ src_len = key_states.size(1)
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
+
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ # apply the causal_attention_mask first
+ if causal_attention_mask is not None:
+ if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
+ f" {causal_attention_mask.size()}"
+ )
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
+ )
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if output_attentions:
+ # this operation is a bit akward, but it's required to
+ # make sure that attn_weights keeps its gradient.
+ # In order to do so, attn_weights have to reshaped
+ # twice and have to be reused in the following
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
+ else:
+ attn_weights_reshaped = None
+
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ attn_output = torch.bmm(attn_probs, value_states)
+
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
+ attn_output = attn_output.transpose(1, 2)
+ attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped
+
+
+# Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->GroupViT
+class GroupViTEncoderLayer(nn.Module):
+ def __init__(self, config: GroupViTConfig):
+ super().__init__()
+ self.embed_dim = config.hidden_size
+ self.self_attn = GroupViTAttention(config)
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
+ self.mlp = GroupViTMLP(config)
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: torch.Tensor,
+ causal_attention_mask: torch.Tensor,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.FloatTensor]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ `(config.encoder_attention_heads,)`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+
+ hidden_states = self.layer_norm1(hidden_states)
+ hidden_states, attn_weights = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ causal_attention_mask=causal_attention_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = residual + hidden_states
+
+ residual = hidden_states
+ hidden_states = self.layer_norm2(hidden_states)
+ hidden_states = self.mlp(hidden_states)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+class GroupViTPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = GroupViTConfig
+ base_model_prefix = "groupvit"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+
+ init_range = self.config.initializer_range
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=init_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+ factor = self.config.initializer_factor
+ if isinstance(module, GroupViTTextEmbeddings):
+ module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
+ module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
+ elif isinstance(module, GroupViTAttention):
+ factor = self.config.initializer_factor
+ in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
+ out_proj_std = (module.embed_dim**-0.5) * factor
+ nn.init.normal_(module.q_proj.weight, std=in_proj_std)
+ nn.init.normal_(module.k_proj.weight, std=in_proj_std)
+ nn.init.normal_(module.v_proj.weight, std=in_proj_std)
+ nn.init.normal_(module.out_proj.weight, std=out_proj_std)
+ elif isinstance(module, GroupViTMLP):
+ factor = self.config.initializer_factor
+ in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
+ fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
+ nn.init.normal_(module.fc1.weight, std=fc_std)
+ nn.init.normal_(module.fc2.weight, std=in_proj_std)
+
+
+GROUPVIT_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`GroupViTConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+GROUPVIT_TEXT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`CLIPTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+GROUPVIT_VISION_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+GROUPVIT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`CLIPTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
+ [`CLIPImageProcessor.__call__`] for details.
+ return_loss (`bool`, *optional*):
+ Whether or not to return the contrastive loss.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+class GroupViTVisionEncoder(nn.Module):
+ def __init__(self, config: GroupViTVisionConfig) -> None:
+ super().__init__()
+ self.config = config
+ self.stages = nn.ModuleList(
+ [
+ GroupViTStage(
+ config=config,
+ depth=config.depths[i],
+ num_group_token=config.num_group_tokens[i],
+ num_output_group=config.num_output_groups[i],
+ num_prev_group_token=config.num_output_groups[i - 1] if i > 0 else 0,
+ )
+ for i in range(len(config.depths))
+ ]
+ )
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ output_hidden_states: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[tuple, BaseModelOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ all_hidden_states = () if output_hidden_states else None
+ all_groupings = () if output_attentions else None
+
+ group_tokens = None
+
+ for i, stage in enumerate(self.stages):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_outputs = stage(hidden_states, group_tokens, output_attentions)
+
+ hidden_states = layer_outputs[0]
+ group_tokens = layer_outputs[1]
+
+ if output_attentions and layer_outputs[2] is not None:
+ all_groupings = all_groupings + (layer_outputs[2],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_groupings] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_groupings
+ )
+
+
+class GroupViTTextEncoder(nn.Module):
+ """
+ Transformer encoder consisting of `config.num_hidden_layers` self-attention layers. Each layer is a
+ [`GroupViTEncoderLayer`].
+
+ Args:
+ config: GroupViTTextConfig
+ """
+
+ def __init__(self, config: GroupViTTextConfig):
+ super().__init__()
+ self.config = config
+ self.layers = nn.ModuleList([GroupViTEncoderLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ inputs_embeds,
+ attention_mask: Optional[torch.Tensor] = None,
+ causal_attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ r"""
+ Args:
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Causal mask for the text model. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ hidden_states = inputs_embeds
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ encoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ causal_attention_mask,
+ output_attentions,
+ )
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask,
+ causal_attention_mask,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+
+# Copied from transformers.models.clip.modeling_clip.CLIPTextTransformer with CLIPText->GroupViTText, CLIPEncoder->GroupViTTextEncoder, CLIP_TEXT->GROUPVIT_TEXT
+class GroupViTTextTransformer(nn.Module):
+ def __init__(self, config: GroupViTTextConfig):
+ super().__init__()
+ self.config = config
+ embed_dim = config.hidden_size
+ self.embeddings = GroupViTTextEmbeddings(config)
+ self.encoder = GroupViTTextEncoder(config)
+ self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
+
+ # For `pooled_output` computation
+ self.eos_token_id = config.eos_token_id
+
+ @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=GroupViTTextConfig)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
+ r"""
+ Returns:
+
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is None:
+ raise ValueError("You have to specify input_ids")
+
+ input_shape = input_ids.size()
+ input_ids = input_ids.view(-1, input_shape[-1])
+
+ hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids)
+
+ # CLIP's text model uses causal mask, prepare it here.
+ # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324
+ causal_attention_mask = _create_4d_causal_attention_mask(
+ input_shape, hidden_states.dtype, device=hidden_states.device
+ )
+ # expand attention_mask
+ if attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype)
+
+ encoder_outputs = self.encoder(
+ inputs_embeds=hidden_states,
+ attention_mask=attention_mask,
+ causal_attention_mask=causal_attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ last_hidden_state = encoder_outputs[0]
+ last_hidden_state = self.final_layer_norm(last_hidden_state)
+
+ if self.eos_token_id == 2:
+ # The `eos_token_id` was incorrect before PR #24773: Let's keep what have been done here.
+ # A CLIP model with such `eos_token_id` in the config can't work correctly with extra new tokens added
+ # ------------------------------------------------------------
+ # text_embeds.shape = [batch_size, sequence_length, transformer.width]
+ # take features from the eot embedding (eot_token is the highest number in each sequence)
+ # casting to torch.int for onnx compatibility: argmax doesn't support int64 inputs with opset 14
+ pooled_output = last_hidden_state[
+ torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device),
+ input_ids.to(dtype=torch.int, device=last_hidden_state.device).argmax(dim=-1),
+ ]
+ else:
+ # The config gets updated `eos_token_id` from PR #24773 (so the use of exta new tokens is possible)
+ pooled_output = last_hidden_state[
+ torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device),
+ # We need to get the first position of `eos_token_id` value (`pad_token_ids` might equal to `eos_token_id`)
+ (input_ids.to(dtype=torch.int, device=last_hidden_state.device) == self.eos_token_id)
+ .int()
+ .argmax(dim=-1),
+ ]
+
+ if not return_dict:
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPooling(
+ last_hidden_state=last_hidden_state,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+class GroupViTTextModel(GroupViTPreTrainedModel):
+ config_class = GroupViTTextConfig
+
+ def __init__(self, config: GroupViTTextConfig):
+ super().__init__(config)
+ self.text_model = GroupViTTextTransformer(config)
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self) -> nn.Module:
+ return self.text_model.embeddings.token_embedding
+
+ def set_input_embeddings(self, value):
+ self.text_model.embeddings.token_embedding = value
+
+ @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=GroupViTTextConfig)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import CLIPTokenizer, GroupViTTextModel
+
+ >>> tokenizer = CLIPTokenizer.from_pretrained("nvidia/groupvit-gcc-yfcc")
+ >>> model = GroupViTTextModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
+
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
+
+ >>> outputs = model(**inputs)
+ >>> last_hidden_state = outputs.last_hidden_state
+ >>> pooled_output = outputs.pooler_output # pooled (EOS token) states
+ ```"""
+ return self.text_model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+
+class GroupViTVisionTransformer(nn.Module):
+ def __init__(self, config: GroupViTVisionConfig):
+ super().__init__()
+ self.config = config
+ embed_dim = config.hidden_size
+
+ self.embeddings = GroupViTVisionEmbeddings(config)
+ self.encoder = GroupViTVisionEncoder(config)
+ self.layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
+
+ @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=GroupViTVisionConfig)
+ def forward(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
+ r"""
+ Returns:
+
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ hidden_states = self.embeddings(pixel_values)
+
+ encoder_outputs = self.encoder(
+ hidden_states=hidden_states,
+ output_hidden_states=output_hidden_states,
+ output_attentions=output_attentions,
+ return_dict=return_dict,
+ )
+
+ last_hidden_state = encoder_outputs[0]
+
+ # normalize the last hidden state
+ last_hidden_state = self.layernorm(last_hidden_state)
+ pooled_output = last_hidden_state.mean(dim=1)
+
+ if not return_dict:
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPooling(
+ last_hidden_state=last_hidden_state,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+class GroupViTVisionModel(GroupViTPreTrainedModel):
+ config_class = GroupViTVisionConfig
+ main_input_name = "pixel_values"
+
+ def __init__(self, config: GroupViTVisionConfig):
+ super().__init__(config)
+ self.vision_model = GroupViTVisionTransformer(config)
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self) -> GroupViTPatchEmbeddings:
+ return self.vision_model.embeddings.patch_embeddings
+
+ @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=GroupViTVisionConfig)
+ def forward(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, GroupViTVisionModel
+
+ >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc")
+ >>> model = GroupViTVisionModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> inputs = processor(images=image, return_tensors="pt")
+
+ >>> outputs = model(**inputs)
+ >>> last_hidden_state = outputs.last_hidden_state
+ >>> pooled_output = outputs.pooler_output # pooled CLS states
+ ```"""
+ return self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+
+@add_start_docstrings(GROUPVIT_START_DOCSTRING)
+class GroupViTModel(GroupViTPreTrainedModel):
+ config_class = GroupViTConfig
+
+ def __init__(self, config: GroupViTConfig):
+ super().__init__(config)
+
+ if not isinstance(config.text_config, GroupViTTextConfig):
+ raise ValueError(
+ "config.text_config is expected to be of type GroupViTTextConfig but is of type"
+ f" {type(config.text_config)}."
+ )
+
+ if not isinstance(config.vision_config, GroupViTVisionConfig):
+ raise ValueError(
+ "config.vision_config is expected to be of type GroupViTVisionConfig but is of type"
+ f" {type(config.vision_config)}."
+ )
+
+ text_config = config.text_config
+ vision_config = config.vision_config
+
+ self.projection_dim = config.projection_dim
+ self.projection_intermediate_dim = config.projection_intermediate_dim
+ self.text_embed_dim = text_config.hidden_size
+ self.vision_embed_dim = vision_config.hidden_size
+
+ self.text_model = GroupViTTextTransformer(text_config)
+ self.vision_model = GroupViTVisionTransformer(vision_config)
+
+ self.visual_projection = nn.Sequential(
+ nn.Linear(self.vision_embed_dim, self.projection_intermediate_dim, bias=True),
+ nn.BatchNorm1d(self.projection_intermediate_dim),
+ nn.ReLU(inplace=True),
+ nn.Linear(self.projection_intermediate_dim, self.projection_dim, bias=True),
+ )
+ self.text_projection = nn.Sequential(
+ nn.Linear(self.text_embed_dim, self.projection_intermediate_dim, bias=True),
+ nn.BatchNorm1d(self.projection_intermediate_dim),
+ nn.ReLU(inplace=True),
+ nn.Linear(self.projection_intermediate_dim, self.projection_dim, bias=True),
+ )
+ self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING)
+ def get_text_features(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> torch.FloatTensor:
+ r"""
+ Returns:
+ text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
+ applying the projection layer to the pooled output of [`GroupViTTextModel`].
+
+ Examples:
+
+ ```python
+ >>> from transformers import CLIPTokenizer, GroupViTModel
+
+ >>> model = GroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
+ >>> tokenizer = CLIPTokenizer.from_pretrained("nvidia/groupvit-gcc-yfcc")
+
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
+ >>> text_features = model.get_text_features(**inputs)
+ ```"""
+ # Use GROUPVIT model's config for some fields (if specified) instead of those of vision & text components.
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ text_outputs = self.text_model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = text_outputs[1]
+ text_features = self.text_projection(pooled_output)
+
+ return text_features
+
+ @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING)
+ def get_image_features(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> torch.FloatTensor:
+ r"""
+ Returns:
+ image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
+ applying the projection layer to the pooled output of [`GroupViTVisionModel`].
+
+ Examples:
+
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, GroupViTModel
+
+ >>> model = GroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
+ >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc")
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> inputs = processor(images=image, return_tensors="pt")
+
+ >>> image_features = model.get_image_features(**inputs)
+ ```"""
+ # Use GROUPVIT model's config for some fields (if specified) instead of those of vision & text components.
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ vision_outputs = self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = vision_outputs[1] # pooled_output
+ image_features = self.visual_projection(pooled_output)
+
+ return image_features
+
+ @add_start_docstrings_to_model_forward(GROUPVIT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=GroupViTModelOutput, config_class=GroupViTConfig)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ return_loss: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_segmentation: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, GroupViTModelOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, GroupViTModel
+
+ >>> model = GroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
+ >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc")
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> inputs = processor(
+ ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True
+ ... )
+
+ >>> outputs = model(**inputs)
+ >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
+ >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
+ ```"""
+ # Use GROUPVIT model's config for some fields (if specified) instead of those of vision & text components.
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_segmentation = (
+ output_segmentation if output_segmentation is not None else self.config.output_segmentation
+ )
+ if output_segmentation:
+ output_attentions = True
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ vision_outputs = self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ text_outputs = self.text_model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ image_embeds = vision_outputs[1]
+ image_embeds = self.visual_projection(image_embeds)
+
+ text_embeds = text_outputs[1]
+ text_embeds = self.text_projection(text_embeds)
+
+ # normalized features
+ image_embeds = image_embeds / image_embeds.norm(dim=-1, keepdim=True)
+ text_embeds = text_embeds / text_embeds.norm(dim=-1, keepdim=True)
+
+ # cosine similarity as logits
+ logit_scale = self.logit_scale.exp()
+ logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale
+ logits_per_image = logits_per_text.t()
+
+ seg_logits = None
+ if output_segmentation:
+ # grouped features
+ # [batch_size_image, num_group, hidden_size]
+ image_group_embeds = vision_outputs[0]
+ # [batch_size_image*num_group, hidden_size]
+ image_group_embeds = self.visual_projection(image_group_embeds.reshape(-1, image_group_embeds.shape[-1]))
+ if output_hidden_states:
+ attentions = vision_outputs[3]
+ else:
+ attentions = vision_outputs[2]
+ # [batch_size_image, num_group, height, width]
+ grouping = get_grouping_from_attentions(attentions, pixel_values.shape[2:])
+
+ # normalized features
+ image_group_embeds = image_group_embeds / image_group_embeds.norm(dim=-1, keepdim=True)
+ # [batch_size_image x num_group, batch_size_text]
+ logits_per_image_group = torch.matmul(image_group_embeds, text_embeds.t()) * logit_scale
+ # [batch_size_image, batch_size_text, num_group]
+ logits_per_image_group = logits_per_image_group.reshape(
+ image_embeds.shape[0], -1, text_embeds.shape[0]
+ ).permute(0, 2, 1)
+
+ # [batch_size_image, batch_size_text, height x width]
+ flatten_grouping = grouping.reshape(grouping.shape[0], grouping.shape[1], -1)
+
+ # [batch_size_image, batch_size_text, height, width]
+ seg_logits = torch.matmul(logits_per_image_group, flatten_grouping) * logit_scale
+ seg_logits = seg_logits.reshape(
+ seg_logits.shape[0], seg_logits.shape[1], grouping.shape[2], grouping.shape[3]
+ )
+
+ loss = None
+ if return_loss:
+ loss = groupvit_loss(logits_per_text)
+
+ if not return_dict:
+ if seg_logits is not None:
+ output = (
+ logits_per_image,
+ logits_per_text,
+ seg_logits,
+ text_embeds,
+ image_embeds,
+ text_outputs,
+ vision_outputs,
+ )
+ else:
+ output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
+ return ((loss,) + output) if loss is not None else output
+
+ return GroupViTModelOutput(
+ loss=loss,
+ logits_per_image=logits_per_image,
+ logits_per_text=logits_per_text,
+ segmentation_logits=seg_logits,
+ text_embeds=text_embeds,
+ image_embeds=image_embeds,
+ text_model_output=text_outputs,
+ vision_model_output=vision_outputs,
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..0d202eb4d4234f2f1615cb3ff6eba885532bbeae
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__init__.py
@@ -0,0 +1,145 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_tf_available,
+ is_tokenizers_available,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "configuration_mobilebert": [
+ "MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
+ "MobileBertConfig",
+ "MobileBertOnnxConfig",
+ ],
+ "tokenization_mobilebert": ["MobileBertTokenizer"],
+}
+
+try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_mobilebert_fast"] = ["MobileBertTokenizerFast"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_mobilebert"] = [
+ "MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "MobileBertForMaskedLM",
+ "MobileBertForMultipleChoice",
+ "MobileBertForNextSentencePrediction",
+ "MobileBertForPreTraining",
+ "MobileBertForQuestionAnswering",
+ "MobileBertForSequenceClassification",
+ "MobileBertForTokenClassification",
+ "MobileBertLayer",
+ "MobileBertModel",
+ "MobileBertPreTrainedModel",
+ "load_tf_weights_in_mobilebert",
+ ]
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_mobilebert"] = [
+ "TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TFMobileBertForMaskedLM",
+ "TFMobileBertForMultipleChoice",
+ "TFMobileBertForNextSentencePrediction",
+ "TFMobileBertForPreTraining",
+ "TFMobileBertForQuestionAnswering",
+ "TFMobileBertForSequenceClassification",
+ "TFMobileBertForTokenClassification",
+ "TFMobileBertMainLayer",
+ "TFMobileBertModel",
+ "TFMobileBertPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_mobilebert import (
+ MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
+ MobileBertConfig,
+ MobileBertOnnxConfig,
+ )
+ from .tokenization_mobilebert import MobileBertTokenizer
+
+ try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_mobilebert_fast import MobileBertTokenizerFast
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_mobilebert import (
+ MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ MobileBertForMaskedLM,
+ MobileBertForMultipleChoice,
+ MobileBertForNextSentencePrediction,
+ MobileBertForPreTraining,
+ MobileBertForQuestionAnswering,
+ MobileBertForSequenceClassification,
+ MobileBertForTokenClassification,
+ MobileBertLayer,
+ MobileBertModel,
+ MobileBertPreTrainedModel,
+ load_tf_weights_in_mobilebert,
+ )
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_mobilebert import (
+ TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TFMobileBertForMaskedLM,
+ TFMobileBertForMultipleChoice,
+ TFMobileBertForNextSentencePrediction,
+ TFMobileBertForPreTraining,
+ TFMobileBertForQuestionAnswering,
+ TFMobileBertForSequenceClassification,
+ TFMobileBertForTokenClassification,
+ TFMobileBertMainLayer,
+ TFMobileBertModel,
+ TFMobileBertPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d5a9ed7f6bc27fc2477920bf0d5513ab94f78e81
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/configuration_mobilebert.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/configuration_mobilebert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ae3b1d6615d219a96ae28f519316b81f3e612e7e
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/configuration_mobilebert.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/convert_mobilebert_original_tf_checkpoint_to_pytorch.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/convert_mobilebert_original_tf_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..28caa2820d76879766bc305e8a568dd7a528197c
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/convert_mobilebert_original_tf_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/modeling_mobilebert.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/modeling_mobilebert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a583051a2089c3f67cbea8b08a2fabab33a16fe7
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/modeling_mobilebert.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/modeling_tf_mobilebert.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/modeling_tf_mobilebert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7217ac7e080375ee1da1878f8b67d6b7d7e01d82
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/modeling_tf_mobilebert.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/tokenization_mobilebert.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/tokenization_mobilebert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..36c4197316d2b2fe5f1f6a3e03fe92b86280fbfd
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/tokenization_mobilebert.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/tokenization_mobilebert_fast.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/tokenization_mobilebert_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ac6561ec8c65ef288ef4cced89909e82f4b1b1b7
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/__pycache__/tokenization_mobilebert_fast.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/configuration_mobilebert.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/configuration_mobilebert.py
new file mode 100644
index 0000000000000000000000000000000000000000..b14d25ea9ed507974fb9ed12cc12d9a11ec4f9ac
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/configuration_mobilebert.py
@@ -0,0 +1,188 @@
+# coding=utf-8
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" MobileBERT model configuration"""
+from collections import OrderedDict
+from typing import Mapping
+
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "google/mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/config.json"
+}
+
+
+class MobileBertConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`MobileBertModel`] or a [`TFMobileBertModel`]. It
+ is used to instantiate a MobileBERT model according to the specified arguments, defining the model architecture.
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the MobileBERT
+ [google/mobilebert-uncased](https://huggingface.co/google/mobilebert-uncased) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 30522):
+ Vocabulary size of the MobileBERT model. Defines the number of different tokens that can be represented by
+ the `inputs_ids` passed when calling [`MobileBertModel`] or [`TFMobileBertModel`].
+ hidden_size (`int`, *optional*, defaults to 512):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 24):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 4):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 512):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `function`, *optional*, defaults to `"relu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ type_vocab_size (`int`, *optional*, defaults to 2):
+ The vocabulary size of the `token_type_ids` passed when calling [`MobileBertModel`] or
+ [`TFMobileBertModel`].
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+
+ pad_token_id (`int`, *optional*, defaults to 0):
+ The ID of the token in the word embedding to use as padding.
+ embedding_size (`int`, *optional*, defaults to 128):
+ The dimension of the word embedding vectors.
+ trigram_input (`bool`, *optional*, defaults to `True`):
+ Use a convolution of trigram as input.
+ use_bottleneck (`bool`, *optional*, defaults to `True`):
+ Whether to use bottleneck in BERT.
+ intra_bottleneck_size (`int`, *optional*, defaults to 128):
+ Size of bottleneck layer output.
+ use_bottleneck_attention (`bool`, *optional*, defaults to `False`):
+ Whether to use attention inputs from the bottleneck transformation.
+ key_query_shared_bottleneck (`bool`, *optional*, defaults to `True`):
+ Whether to use the same linear transformation for query&key in the bottleneck.
+ num_feedforward_networks (`int`, *optional*, defaults to 4):
+ Number of FFNs in a block.
+ normalization_type (`str`, *optional*, defaults to `"no_norm"`):
+ The normalization type in MobileBERT.
+ classifier_dropout (`float`, *optional*):
+ The dropout ratio for the classification head.
+
+ Examples:
+
+ ```python
+ >>> from transformers import MobileBertConfig, MobileBertModel
+
+ >>> # Initializing a MobileBERT configuration
+ >>> configuration = MobileBertConfig()
+
+ >>> # Initializing a model (with random weights) from the configuration above
+ >>> model = MobileBertModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```
+
+ Attributes: pretrained_config_archive_map (Dict[str, str]): A dictionary containing all the available pre-trained
+ checkpoints.
+ """
+
+ pretrained_config_archive_map = MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP
+ model_type = "mobilebert"
+
+ def __init__(
+ self,
+ vocab_size=30522,
+ hidden_size=512,
+ num_hidden_layers=24,
+ num_attention_heads=4,
+ intermediate_size=512,
+ hidden_act="relu",
+ hidden_dropout_prob=0.0,
+ attention_probs_dropout_prob=0.1,
+ max_position_embeddings=512,
+ type_vocab_size=2,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ pad_token_id=0,
+ embedding_size=128,
+ trigram_input=True,
+ use_bottleneck=True,
+ intra_bottleneck_size=128,
+ use_bottleneck_attention=False,
+ key_query_shared_bottleneck=True,
+ num_feedforward_networks=4,
+ normalization_type="no_norm",
+ classifier_activation=True,
+ classifier_dropout=None,
+ **kwargs,
+ ):
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
+
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.hidden_act = hidden_act
+ self.intermediate_size = intermediate_size
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.max_position_embeddings = max_position_embeddings
+ self.type_vocab_size = type_vocab_size
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.embedding_size = embedding_size
+ self.trigram_input = trigram_input
+ self.use_bottleneck = use_bottleneck
+ self.intra_bottleneck_size = intra_bottleneck_size
+ self.use_bottleneck_attention = use_bottleneck_attention
+ self.key_query_shared_bottleneck = key_query_shared_bottleneck
+ self.num_feedforward_networks = num_feedforward_networks
+ self.normalization_type = normalization_type
+ self.classifier_activation = classifier_activation
+
+ if self.use_bottleneck:
+ self.true_hidden_size = intra_bottleneck_size
+ else:
+ self.true_hidden_size = hidden_size
+
+ self.classifier_dropout = classifier_dropout
+
+
+# Copied from transformers.models.bert.configuration_bert.BertOnnxConfig with Bert->MobileBert
+class MobileBertOnnxConfig(OnnxConfig):
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ if self.task == "multiple-choice":
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
+ else:
+ dynamic_axis = {0: "batch", 1: "sequence"}
+ return OrderedDict(
+ [
+ ("input_ids", dynamic_axis),
+ ("attention_mask", dynamic_axis),
+ ("token_type_ids", dynamic_axis),
+ ]
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/convert_mobilebert_original_tf_checkpoint_to_pytorch.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/convert_mobilebert_original_tf_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..022a9d036cdb24558142222a6aec5fd3ed65afd7
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/convert_mobilebert_original_tf_checkpoint_to_pytorch.py
@@ -0,0 +1,58 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+
+import torch
+
+from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
+from transformers.utils import logging
+
+
+logging.set_verbosity_info()
+
+
+def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, mobilebert_config_file, pytorch_dump_path):
+ # Initialise PyTorch model
+ config = MobileBertConfig.from_json_file(mobilebert_config_file)
+ print(f"Building PyTorch model from configuration: {config}")
+ model = MobileBertForPreTraining(config)
+ # Load weights from tf checkpoint
+ model = load_tf_weights_in_mobilebert(model, config, tf_checkpoint_path)
+ # Save pytorch-model
+ print(f"Save PyTorch model to {pytorch_dump_path}")
+ torch.save(model.state_dict(), pytorch_dump_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
+ )
+ parser.add_argument(
+ "--mobilebert_config_file",
+ default=None,
+ type=str,
+ required=True,
+ help=(
+ "The config json file corresponding to the pre-trained MobileBERT model. \n"
+ "This specifies the model architecture."
+ ),
+ )
+ parser.add_argument(
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
+ )
+ args = parser.parse_args()
+ convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/modeling_mobilebert.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/modeling_mobilebert.py
new file mode 100644
index 0000000000000000000000000000000000000000..70f2ebc7bfd8f73f8597073c775fb0860e36a469
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/modeling_mobilebert.py
@@ -0,0 +1,1617 @@
+# MIT License
+#
+# Copyright (c) 2020 The Google AI Language Team Authors, The HuggingFace Inc. team and github/lonePatient
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+import math
+import os
+import warnings
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import torch
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import (
+ BaseModelOutput,
+ BaseModelOutputWithPooling,
+ MaskedLMOutput,
+ MultipleChoiceModelOutput,
+ NextSentencePredictorOutput,
+ QuestionAnsweringModelOutput,
+ SequenceClassifierOutput,
+ TokenClassifierOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_mobilebert import MobileBertConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "google/mobilebert-uncased"
+_CONFIG_FOR_DOC = "MobileBertConfig"
+
+# TokenClassification docstring
+_CHECKPOINT_FOR_TOKEN_CLASSIFICATION = "mrm8488/mobilebert-finetuned-ner"
+_TOKEN_CLASS_EXPECTED_OUTPUT = "['I-ORG', 'I-ORG', 'O', 'O', 'O', 'O', 'O', 'I-LOC', 'O', 'I-LOC', 'I-LOC']"
+_TOKEN_CLASS_EXPECTED_LOSS = 0.03
+
+# QuestionAnswering docstring
+_CHECKPOINT_FOR_QA = "csarron/mobilebert-uncased-squad-v2"
+_QA_EXPECTED_OUTPUT = "'a nice puppet'"
+_QA_EXPECTED_LOSS = 3.98
+_QA_TARGET_START_INDEX = 12
+_QA_TARGET_END_INDEX = 13
+
+# SequenceClassification docstring
+_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "lordtt13/emo-mobilebert"
+_SEQ_CLASS_EXPECTED_OUTPUT = "'others'"
+_SEQ_CLASS_EXPECTED_LOSS = "4.72"
+
+MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = ["google/mobilebert-uncased"]
+
+
+def load_tf_weights_in_mobilebert(model, config, tf_checkpoint_path):
+ """Load tf checkpoints in a pytorch model."""
+ try:
+ import re
+
+ import numpy as np
+ import tensorflow as tf
+ except ImportError:
+ logger.error(
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
+ "https://www.tensorflow.org/install/ for installation instructions."
+ )
+ raise
+ tf_path = os.path.abspath(tf_checkpoint_path)
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
+ # Load weights from TF model
+ init_vars = tf.train.list_variables(tf_path)
+ names = []
+ arrays = []
+ for name, shape in init_vars:
+ logger.info(f"Loading TF weight {name} with shape {shape}")
+ array = tf.train.load_variable(tf_path, name)
+ names.append(name)
+ arrays.append(array)
+
+ for name, array in zip(names, arrays):
+ name = name.replace("ffn_layer", "ffn")
+ name = name.replace("FakeLayerNorm", "LayerNorm")
+ name = name.replace("extra_output_weights", "dense/kernel")
+ name = name.replace("bert", "mobilebert")
+ name = name.split("/")
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
+ # which are not required for using pretrained model
+ if any(
+ n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
+ for n in name
+ ):
+ logger.info(f"Skipping {'/'.join(name)}")
+ continue
+ pointer = model
+ for m_name in name:
+ if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
+ scope_names = re.split(r"_(\d+)", m_name)
+ else:
+ scope_names = [m_name]
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
+ pointer = getattr(pointer, "weight")
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
+ pointer = getattr(pointer, "bias")
+ elif scope_names[0] == "output_weights":
+ pointer = getattr(pointer, "weight")
+ elif scope_names[0] == "squad":
+ pointer = getattr(pointer, "classifier")
+ else:
+ try:
+ pointer = getattr(pointer, scope_names[0])
+ except AttributeError:
+ logger.info(f"Skipping {'/'.join(name)}")
+ continue
+ if len(scope_names) >= 2:
+ num = int(scope_names[1])
+ pointer = pointer[num]
+ if m_name[-11:] == "_embeddings":
+ pointer = getattr(pointer, "weight")
+ elif m_name == "kernel":
+ array = np.transpose(array)
+ try:
+ assert (
+ pointer.shape == array.shape
+ ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
+ except AssertionError as e:
+ e.args += (pointer.shape, array.shape)
+ raise
+ logger.info(f"Initialize PyTorch weight {name}")
+ pointer.data = torch.from_numpy(array)
+ return model
+
+
+class NoNorm(nn.Module):
+ def __init__(self, feat_size, eps=None):
+ super().__init__()
+ self.bias = nn.Parameter(torch.zeros(feat_size))
+ self.weight = nn.Parameter(torch.ones(feat_size))
+
+ def forward(self, input_tensor: torch.Tensor) -> torch.Tensor:
+ return input_tensor * self.weight + self.bias
+
+
+NORM2FN = {"layer_norm": nn.LayerNorm, "no_norm": NoNorm}
+
+
+class MobileBertEmbeddings(nn.Module):
+ """Construct the embeddings from word, position and token_type embeddings."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.trigram_input = config.trigram_input
+ self.embedding_size = config.embedding_size
+ self.hidden_size = config.hidden_size
+
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
+
+ embed_dim_multiplier = 3 if self.trigram_input else 1
+ embedded_input_size = self.embedding_size * embed_dim_multiplier
+ self.embedding_transformation = nn.Linear(embedded_input_size, config.hidden_size)
+
+ self.LayerNorm = NORM2FN[config.normalization_type](config.hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
+ )
+
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ ) -> torch.Tensor:
+ if input_ids is not None:
+ input_shape = input_ids.size()
+ else:
+ input_shape = inputs_embeds.size()[:-1]
+
+ seq_length = input_shape[1]
+
+ if position_ids is None:
+ position_ids = self.position_ids[:, :seq_length]
+
+ if token_type_ids is None:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
+ if inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+
+ if self.trigram_input:
+ # From the paper MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited
+ # Devices (https://arxiv.org/abs/2004.02984)
+ #
+ # The embedding table in BERT models accounts for a substantial proportion of model size. To compress
+ # the embedding layer, we reduce the embedding dimension to 128 in MobileBERT.
+ # Then, we apply a 1D convolution with kernel size 3 on the raw token embedding to produce a 512
+ # dimensional output.
+ inputs_embeds = torch.cat(
+ [
+ nn.functional.pad(inputs_embeds[:, 1:], [0, 0, 0, 1, 0, 0], value=0.0),
+ inputs_embeds,
+ nn.functional.pad(inputs_embeds[:, :-1], [0, 0, 1, 0, 0, 0], value=0.0),
+ ],
+ dim=2,
+ )
+ if self.trigram_input or self.embedding_size != self.hidden_size:
+ inputs_embeds = self.embedding_transformation(inputs_embeds)
+
+ # Add positional embeddings and token type embeddings, then layer
+ # normalize and perform dropout.
+ position_embeddings = self.position_embeddings(position_ids)
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
+ embeddings = inputs_embeds + position_embeddings + token_type_embeddings
+ embeddings = self.LayerNorm(embeddings)
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+
+class MobileBertSelfAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.true_hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.true_hidden_size, self.all_head_size)
+ self.key = nn.Linear(config.true_hidden_size, self.all_head_size)
+ self.value = nn.Linear(
+ config.true_hidden_size if config.use_bottleneck_attention else config.hidden_size, self.all_head_size
+ )
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+
+ def transpose_for_scores(self, x):
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ query_tensor: torch.Tensor,
+ key_tensor: torch.Tensor,
+ value_tensor: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ ) -> Tuple[torch.Tensor]:
+ mixed_query_layer = self.query(query_tensor)
+ mixed_key_layer = self.key(key_tensor)
+ mixed_value_layer = self.value(value_tensor)
+
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+ key_layer = self.transpose_for_scores(mixed_key_layer)
+ value_layer = self.transpose_for_scores(mixed_value_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
+ attention_scores = attention_scores + attention_mask
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+ context_layer = torch.matmul(attention_probs, value_layer)
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+ return outputs
+
+
+class MobileBertSelfOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.use_bottleneck = config.use_bottleneck
+ self.dense = nn.Linear(config.true_hidden_size, config.true_hidden_size)
+ self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size, eps=config.layer_norm_eps)
+ if not self.use_bottleneck:
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, residual_tensor: torch.Tensor) -> torch.Tensor:
+ layer_outputs = self.dense(hidden_states)
+ if not self.use_bottleneck:
+ layer_outputs = self.dropout(layer_outputs)
+ layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)
+ return layer_outputs
+
+
+class MobileBertAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.self = MobileBertSelfAttention(config)
+ self.output = MobileBertSelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.self.query = prune_linear_layer(self.self.query, index)
+ self.self.key = prune_linear_layer(self.self.key, index)
+ self.self.value = prune_linear_layer(self.self.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ query_tensor: torch.Tensor,
+ key_tensor: torch.Tensor,
+ value_tensor: torch.Tensor,
+ layer_input: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ ) -> Tuple[torch.Tensor]:
+ self_outputs = self.self(
+ query_tensor,
+ key_tensor,
+ value_tensor,
+ attention_mask,
+ head_mask,
+ output_attentions,
+ )
+ # Run a linear projection of `hidden_size` then add a residual
+ # with `layer_input`.
+ attention_output = self.output(self_outputs[0], layer_input)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+class MobileBertIntermediate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.true_hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+class OutputBottleneck(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.true_hidden_size, config.hidden_size)
+ self.LayerNorm = NORM2FN[config.normalization_type](config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, residual_tensor: torch.Tensor) -> torch.Tensor:
+ layer_outputs = self.dense(hidden_states)
+ layer_outputs = self.dropout(layer_outputs)
+ layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)
+ return layer_outputs
+
+
+class MobileBertOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.use_bottleneck = config.use_bottleneck
+ self.dense = nn.Linear(config.intermediate_size, config.true_hidden_size)
+ self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size)
+ if not self.use_bottleneck:
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ else:
+ self.bottleneck = OutputBottleneck(config)
+
+ def forward(
+ self, intermediate_states: torch.Tensor, residual_tensor_1: torch.Tensor, residual_tensor_2: torch.Tensor
+ ) -> torch.Tensor:
+ layer_output = self.dense(intermediate_states)
+ if not self.use_bottleneck:
+ layer_output = self.dropout(layer_output)
+ layer_output = self.LayerNorm(layer_output + residual_tensor_1)
+ else:
+ layer_output = self.LayerNorm(layer_output + residual_tensor_1)
+ layer_output = self.bottleneck(layer_output, residual_tensor_2)
+ return layer_output
+
+
+class BottleneckLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intra_bottleneck_size)
+ self.LayerNorm = NORM2FN[config.normalization_type](config.intra_bottleneck_size, eps=config.layer_norm_eps)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ layer_input = self.dense(hidden_states)
+ layer_input = self.LayerNorm(layer_input)
+ return layer_input
+
+
+class Bottleneck(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.key_query_shared_bottleneck = config.key_query_shared_bottleneck
+ self.use_bottleneck_attention = config.use_bottleneck_attention
+ self.input = BottleneckLayer(config)
+ if self.key_query_shared_bottleneck:
+ self.attention = BottleneckLayer(config)
+
+ def forward(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor]:
+ # This method can return three different tuples of values. These different values make use of bottlenecks,
+ # which are linear layers used to project the hidden states to a lower-dimensional vector, reducing memory
+ # usage. These linear layer have weights that are learned during training.
+ #
+ # If `config.use_bottleneck_attention`, it will return the result of the bottleneck layer four times for the
+ # key, query, value, and "layer input" to be used by the attention layer.
+ # This bottleneck is used to project the hidden. This last layer input will be used as a residual tensor
+ # in the attention self output, after the attention scores have been computed.
+ #
+ # If not `config.use_bottleneck_attention` and `config.key_query_shared_bottleneck`, this will return
+ # four values, three of which have been passed through a bottleneck: the query and key, passed through the same
+ # bottleneck, and the residual layer to be applied in the attention self output, through another bottleneck.
+ #
+ # Finally, in the last case, the values for the query, key and values are the hidden states without bottleneck,
+ # and the residual layer will be this value passed through a bottleneck.
+
+ bottlenecked_hidden_states = self.input(hidden_states)
+ if self.use_bottleneck_attention:
+ return (bottlenecked_hidden_states,) * 4
+ elif self.key_query_shared_bottleneck:
+ shared_attention_input = self.attention(hidden_states)
+ return (shared_attention_input, shared_attention_input, hidden_states, bottlenecked_hidden_states)
+ else:
+ return (hidden_states, hidden_states, hidden_states, bottlenecked_hidden_states)
+
+
+class FFNOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.true_hidden_size)
+ self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size, eps=config.layer_norm_eps)
+
+ def forward(self, hidden_states: torch.Tensor, residual_tensor: torch.Tensor) -> torch.Tensor:
+ layer_outputs = self.dense(hidden_states)
+ layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)
+ return layer_outputs
+
+
+class FFNLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.intermediate = MobileBertIntermediate(config)
+ self.output = FFNOutput(config)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ intermediate_output = self.intermediate(hidden_states)
+ layer_outputs = self.output(intermediate_output, hidden_states)
+ return layer_outputs
+
+
+class MobileBertLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.use_bottleneck = config.use_bottleneck
+ self.num_feedforward_networks = config.num_feedforward_networks
+
+ self.attention = MobileBertAttention(config)
+ self.intermediate = MobileBertIntermediate(config)
+ self.output = MobileBertOutput(config)
+ if self.use_bottleneck:
+ self.bottleneck = Bottleneck(config)
+ if config.num_feedforward_networks > 1:
+ self.ffn = nn.ModuleList([FFNLayer(config) for _ in range(config.num_feedforward_networks - 1)])
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ ) -> Tuple[torch.Tensor]:
+ if self.use_bottleneck:
+ query_tensor, key_tensor, value_tensor, layer_input = self.bottleneck(hidden_states)
+ else:
+ query_tensor, key_tensor, value_tensor, layer_input = [hidden_states] * 4
+
+ self_attention_outputs = self.attention(
+ query_tensor,
+ key_tensor,
+ value_tensor,
+ layer_input,
+ attention_mask,
+ head_mask,
+ output_attentions=output_attentions,
+ )
+ attention_output = self_attention_outputs[0]
+ s = (attention_output,)
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ if self.num_feedforward_networks != 1:
+ for i, ffn_module in enumerate(self.ffn):
+ attention_output = ffn_module(attention_output)
+ s += (attention_output,)
+
+ intermediate_output = self.intermediate(attention_output)
+ layer_output = self.output(intermediate_output, attention_output, hidden_states)
+ outputs = (
+ (layer_output,)
+ + outputs
+ + (
+ torch.tensor(1000),
+ query_tensor,
+ key_tensor,
+ value_tensor,
+ layer_input,
+ attention_output,
+ intermediate_output,
+ )
+ + s
+ )
+ return outputs
+
+
+class MobileBertEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.layer = nn.ModuleList([MobileBertLayer(config) for _ in range(config.num_hidden_layers)])
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = False,
+ output_hidden_states: Optional[bool] = False,
+ return_dict: Optional[bool] = True,
+ ) -> Union[Tuple, BaseModelOutput]:
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_outputs = layer_module(
+ hidden_states,
+ attention_mask,
+ head_mask[i],
+ output_attentions,
+ )
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ # Add last layer
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
+ )
+
+
+class MobileBertPooler(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.do_activate = config.classifier_activation
+ if self.do_activate:
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ if not self.do_activate:
+ return first_token_tensor
+ else:
+ pooled_output = self.dense(first_token_tensor)
+ pooled_output = torch.tanh(pooled_output)
+ return pooled_output
+
+
+class MobileBertPredictionHeadTransform(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ if isinstance(config.hidden_act, str):
+ self.transform_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.transform_act_fn = config.hidden_act
+ self.LayerNorm = NORM2FN["layer_norm"](config.hidden_size, eps=config.layer_norm_eps)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.transform_act_fn(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states)
+ return hidden_states
+
+
+class MobileBertLMPredictionHead(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.transform = MobileBertPredictionHeadTransform(config)
+ # The output weights are the same as the input embeddings, but there is
+ # an output-only bias for each token.
+ self.dense = nn.Linear(config.vocab_size, config.hidden_size - config.embedding_size, bias=False)
+ self.decoder = nn.Linear(config.embedding_size, config.vocab_size, bias=False)
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
+ self.decoder.bias = self.bias
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.transform(hidden_states)
+ hidden_states = hidden_states.matmul(torch.cat([self.decoder.weight.t(), self.dense.weight], dim=0))
+ hidden_states += self.decoder.bias
+ return hidden_states
+
+
+class MobileBertOnlyMLMHead(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.predictions = MobileBertLMPredictionHead(config)
+
+ def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
+ prediction_scores = self.predictions(sequence_output)
+ return prediction_scores
+
+
+class MobileBertPreTrainingHeads(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.predictions = MobileBertLMPredictionHead(config)
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
+
+ def forward(self, sequence_output: torch.Tensor, pooled_output: torch.Tensor) -> Tuple[torch.Tensor]:
+ prediction_scores = self.predictions(sequence_output)
+ seq_relationship_score = self.seq_relationship(pooled_output)
+ return prediction_scores, seq_relationship_score
+
+
+class MobileBertPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = MobileBertConfig
+ pretrained_model_archive_map = MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST
+ load_tf_weights = load_tf_weights_in_mobilebert
+ base_model_prefix = "mobilebert"
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, (nn.LayerNorm, NoNorm)):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+@dataclass
+class MobileBertForPreTrainingOutput(ModelOutput):
+ """
+ Output type of [`MobileBertForPreTraining`].
+
+ Args:
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction
+ (classification) loss.
+ prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
+ before SoftMax).
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ prediction_logits: torch.FloatTensor = None
+ seq_relationship_logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+MOBILEBERT_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`MobileBertConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+MOBILEBERT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare MobileBert Model transformer outputting raw hidden-states without any specific head on top.",
+ MOBILEBERT_START_DOCSTRING,
+)
+class MobileBertModel(MobileBertPreTrainedModel):
+ """
+ https://arxiv.org/pdf/2004.02984.pdf
+ """
+
+ def __init__(self, config, add_pooling_layer=True):
+ super().__init__(config)
+ self.config = config
+ self.embeddings = MobileBertEmbeddings(config)
+ self.encoder = MobileBertEncoder(config)
+
+ self.pooler = MobileBertPooler(config) if add_pooling_layer else None
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.word_embeddings = value
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithPooling,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ if attention_mask is None:
+ attention_mask = torch.ones(input_shape, device=device)
+ if token_type_ids is None:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
+
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
+ # ourselves in which case we just need to make it broadcastable to all heads.
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ embedding_output = self.embeddings(
+ input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
+ )
+ encoder_outputs = self.encoder(
+ embedding_output,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
+
+ if not return_dict:
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPooling(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ MobileBert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a
+ `next sentence prediction (classification)` head.
+ """,
+ MOBILEBERT_START_DOCSTRING,
+)
+class MobileBertForPreTraining(MobileBertPreTrainedModel):
+ _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"]
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.mobilebert = MobileBertModel(config)
+ self.cls = MobileBertPreTrainingHeads(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.cls.predictions.decoder
+
+ def set_output_embeddings(self, new_embeddigs):
+ self.cls.predictions.decoder = new_embeddigs
+
+ def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding:
+ # resize dense output embedings at first
+ self.cls.predictions.dense = self._get_resized_lm_head(
+ self.cls.predictions.dense, new_num_tokens=new_num_tokens, transposed=True
+ )
+
+ return super().resize_token_embeddings(new_num_tokens=new_num_tokens)
+
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=MobileBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ next_sentence_label: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[torch.FloatTensor] = None,
+ output_hidden_states: Optional[torch.FloatTensor] = None,
+ return_dict: Optional[torch.FloatTensor] = None,
+ ) -> Union[Tuple, MobileBertForPreTrainingOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
+ (see `input_ids` docstring) Indices should be in `[0, 1]`:
+
+ - 0 indicates sequence B is a continuation of sequence A,
+ - 1 indicates sequence B is a random sequence.
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, MobileBertForPreTraining
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/mobilebert-uncased")
+ >>> model = MobileBertForPreTraining.from_pretrained("google/mobilebert-uncased")
+
+ >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0)
+ >>> # Batch size 1
+ >>> outputs = model(input_ids)
+
+ >>> prediction_logits = outputs.prediction_logits
+ >>> seq_relationship_logits = outputs.seq_relationship_logits
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.mobilebert(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output, pooled_output = outputs[:2]
+ prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
+
+ total_loss = None
+ if labels is not None and next_sentence_label is not None:
+ loss_fct = CrossEntropyLoss()
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
+ next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
+ total_loss = masked_lm_loss + next_sentence_loss
+
+ if not return_dict:
+ output = (prediction_scores, seq_relationship_score) + outputs[2:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return MobileBertForPreTrainingOutput(
+ loss=total_loss,
+ prediction_logits=prediction_scores,
+ seq_relationship_logits=seq_relationship_score,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings("""MobileBert Model with a `language modeling` head on top.""", MOBILEBERT_START_DOCSTRING)
+class MobileBertForMaskedLM(MobileBertPreTrainedModel):
+ _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"]
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.mobilebert = MobileBertModel(config, add_pooling_layer=False)
+ self.cls = MobileBertOnlyMLMHead(config)
+ self.config = config
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.cls.predictions.decoder
+
+ def set_output_embeddings(self, new_embeddigs):
+ self.cls.predictions.decoder = new_embeddigs
+
+ def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding:
+ # resize dense output embedings at first
+ self.cls.predictions.dense = self._get_resized_lm_head(
+ self.cls.predictions.dense, new_num_tokens=new_num_tokens, transposed=True
+ )
+ return super().resize_token_embeddings(new_num_tokens=new_num_tokens)
+
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=MaskedLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output="'paris'",
+ expected_loss=0.57,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, MaskedLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.mobilebert(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+ prediction_scores = self.cls(sequence_output)
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[2:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return MaskedLMOutput(
+ loss=masked_lm_loss,
+ logits=prediction_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+class MobileBertOnlyNSPHead(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
+
+ def forward(self, pooled_output: torch.Tensor) -> torch.Tensor:
+ seq_relationship_score = self.seq_relationship(pooled_output)
+ return seq_relationship_score
+
+
+@add_start_docstrings(
+ """MobileBert Model with a `next sentence prediction (classification)` head on top.""",
+ MOBILEBERT_START_DOCSTRING,
+)
+class MobileBertForNextSentencePrediction(MobileBertPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.mobilebert = MobileBertModel(config)
+ self.cls = MobileBertOnlyNSPHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs,
+ ) -> Union[Tuple, NextSentencePredictorOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
+ (see `input_ids` docstring) Indices should be in `[0, 1]`.
+
+ - 0 indicates sequence B is a continuation of sequence A,
+ - 1 indicates sequence B is a random sequence.
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, MobileBertForNextSentencePrediction
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/mobilebert-uncased")
+ >>> model = MobileBertForNextSentencePrediction.from_pretrained("google/mobilebert-uncased")
+
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
+ >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
+ >>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
+
+ >>> outputs = model(**encoding, labels=torch.LongTensor([1]))
+ >>> loss = outputs.loss
+ >>> logits = outputs.logits
+ ```"""
+
+ if "next_sentence_label" in kwargs:
+ warnings.warn(
+ "The `next_sentence_label` argument is deprecated and will be removed in a future version, use"
+ " `labels` instead.",
+ FutureWarning,
+ )
+ labels = kwargs.pop("next_sentence_label")
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.mobilebert(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = outputs[1]
+ seq_relationship_score = self.cls(pooled_output)
+
+ next_sentence_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), labels.view(-1))
+
+ if not return_dict:
+ output = (seq_relationship_score,) + outputs[2:]
+ return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
+
+ return NextSentencePredictorOutput(
+ loss=next_sentence_loss,
+ logits=seq_relationship_score,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
+ pooled output) e.g. for GLUE tasks.
+ """,
+ MOBILEBERT_START_DOCSTRING,
+)
+# Copied from transformers.models.bert.modeling_bert.BertForSequenceClassification with Bert->MobileBert all-casing
+class MobileBertForSequenceClassification(MobileBertPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.config = config
+
+ self.mobilebert = MobileBertModel(config)
+ classifier_dropout = (
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
+ )
+ self.dropout = nn.Dropout(classifier_dropout)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION,
+ output_type=SequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,
+ expected_loss=_SEQ_CLASS_EXPECTED_LOSS,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.mobilebert(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = outputs[1]
+
+ pooled_output = self.dropout(pooled_output)
+ logits = self.classifier(pooled_output)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ MobileBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
+ linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ MOBILEBERT_START_DOCSTRING,
+)
+# Copied from transformers.models.bert.modeling_bert.BertForQuestionAnswering with Bert->MobileBert all-casing
+class MobileBertForQuestionAnswering(MobileBertPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.mobilebert = MobileBertModel(config, add_pooling_layer=False)
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_QA,
+ output_type=QuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ qa_target_start_index=_QA_TARGET_START_INDEX,
+ qa_target_end_index=_QA_TARGET_END_INDEX,
+ expected_output=_QA_EXPECTED_OUTPUT,
+ expected_loss=_QA_EXPECTED_LOSS,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ start_positions: Optional[torch.Tensor] = None,
+ end_positions: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.mobilebert(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = logits.split(1, dim=-1)
+ start_logits = start_logits.squeeze(-1).contiguous()
+ end_logits = end_logits.squeeze(-1).contiguous()
+
+ total_loss = None
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, split add a dimension
+ if len(start_positions.size()) > 1:
+ start_positions = start_positions.squeeze(-1)
+ if len(end_positions.size()) > 1:
+ end_positions = end_positions.squeeze(-1)
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
+ ignored_index = start_logits.size(1)
+ start_positions = start_positions.clamp(0, ignored_index)
+ end_positions = end_positions.clamp(0, ignored_index)
+
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[2:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return QuestionAnsweringModelOutput(
+ loss=total_loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ MobileBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
+ a softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ MOBILEBERT_START_DOCSTRING,
+)
+# Copied from transformers.models.bert.modeling_bert.BertForMultipleChoice with Bert->MobileBert all-casing
+class MobileBertForMultipleChoice(MobileBertPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.mobilebert = MobileBertModel(config)
+ classifier_dropout = (
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
+ )
+ self.dropout = nn.Dropout(classifier_dropout)
+ self.classifier = nn.Linear(config.hidden_size, 1)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(
+ MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
+ )
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=MultipleChoiceModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
+ `input_ids` above)
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
+
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
+ inputs_embeds = (
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
+ if inputs_embeds is not None
+ else None
+ )
+
+ outputs = self.mobilebert(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = outputs[1]
+
+ pooled_output = self.dropout(pooled_output)
+ logits = self.classifier(pooled_output)
+ reshaped_logits = logits.view(-1, num_choices)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(reshaped_logits, labels)
+
+ if not return_dict:
+ output = (reshaped_logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return MultipleChoiceModelOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ MobileBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
+ for Named-Entity-Recognition (NER) tasks.
+ """,
+ MOBILEBERT_START_DOCSTRING,
+)
+# Copied from transformers.models.bert.modeling_bert.BertForTokenClassification with Bert->MobileBert all-casing
+class MobileBertForTokenClassification(MobileBertPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.mobilebert = MobileBertModel(config, add_pooling_layer=False)
+ classifier_dropout = (
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
+ )
+ self.dropout = nn.Dropout(classifier_dropout)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_TOKEN_CLASSIFICATION,
+ output_type=TokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_TOKEN_CLASS_EXPECTED_OUTPUT,
+ expected_loss=_TOKEN_CLASS_EXPECTED_LOSS,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.mobilebert(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ sequence_output = self.dropout(sequence_output)
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/modeling_tf_mobilebert.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/modeling_tf_mobilebert.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ccc996557532b0cc8488d18672e3a3ce8f466c0
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/modeling_tf_mobilebert.py
@@ -0,0 +1,1972 @@
+# coding=utf-8
+# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" TF 2.0 MobileBERT model."""
+
+
+from __future__ import annotations
+
+import warnings
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ...activations_tf import get_tf_activation
+from ...modeling_tf_outputs import (
+ TFBaseModelOutput,
+ TFBaseModelOutputWithPooling,
+ TFMaskedLMOutput,
+ TFMultipleChoiceModelOutput,
+ TFNextSentencePredictorOutput,
+ TFQuestionAnsweringModelOutput,
+ TFSequenceClassifierOutput,
+ TFTokenClassifierOutput,
+)
+from ...modeling_tf_utils import (
+ TFMaskedLanguageModelingLoss,
+ TFModelInputType,
+ TFMultipleChoiceLoss,
+ TFNextSentencePredictionLoss,
+ TFPreTrainedModel,
+ TFQuestionAnsweringLoss,
+ TFSequenceClassificationLoss,
+ TFTokenClassificationLoss,
+ get_initializer,
+ keras,
+ keras_serializable,
+ unpack_inputs,
+)
+from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_mobilebert import MobileBertConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "google/mobilebert-uncased"
+_CONFIG_FOR_DOC = "MobileBertConfig"
+
+# TokenClassification docstring
+_CHECKPOINT_FOR_TOKEN_CLASSIFICATION = "vumichien/mobilebert-finetuned-ner"
+_TOKEN_CLASS_EXPECTED_OUTPUT = "['I-ORG', 'I-ORG', 'O', 'O', 'O', 'O', 'O', 'I-LOC', 'O', 'I-LOC', 'I-LOC']"
+_TOKEN_CLASS_EXPECTED_LOSS = 0.03
+
+# QuestionAnswering docstring
+_CHECKPOINT_FOR_QA = "vumichien/mobilebert-uncased-squad-v2"
+_QA_EXPECTED_OUTPUT = "'a nice puppet'"
+_QA_EXPECTED_LOSS = 3.98
+_QA_TARGET_START_INDEX = 12
+_QA_TARGET_END_INDEX = 13
+
+# SequenceClassification docstring
+_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "vumichien/emo-mobilebert"
+_SEQ_CLASS_EXPECTED_OUTPUT = "'others'"
+_SEQ_CLASS_EXPECTED_LOSS = "4.72"
+
+TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "google/mobilebert-uncased",
+ # See all MobileBERT models at https://huggingface.co/models?filter=mobilebert
+]
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertPreTrainingLoss
+class TFMobileBertPreTrainingLoss:
+ """
+ Loss function suitable for BERT-like pretraining, that is, the task of pretraining a language model by combining
+ NSP + MLM. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss
+ computation.
+ """
+
+ def hf_compute_loss(self, labels: tf.Tensor, logits: tf.Tensor) -> tf.Tensor:
+ loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=keras.losses.Reduction.NONE)
+
+ # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway
+ unmasked_lm_losses = loss_fn(y_true=tf.nn.relu(labels["labels"]), y_pred=logits[0])
+ # make sure only labels that are not equal to -100
+ # are taken into account for the loss computation
+ lm_loss_mask = tf.cast(labels["labels"] != -100, dtype=unmasked_lm_losses.dtype)
+ masked_lm_losses = unmasked_lm_losses * lm_loss_mask
+ reduced_masked_lm_loss = tf.reduce_sum(masked_lm_losses) / tf.reduce_sum(lm_loss_mask)
+
+ # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway
+ unmasked_ns_loss = loss_fn(y_true=tf.nn.relu(labels["next_sentence_label"]), y_pred=logits[1])
+ ns_loss_mask = tf.cast(labels["next_sentence_label"] != -100, dtype=unmasked_ns_loss.dtype)
+ masked_ns_loss = unmasked_ns_loss * ns_loss_mask
+
+ reduced_masked_ns_loss = tf.reduce_sum(masked_ns_loss) / tf.reduce_sum(ns_loss_mask)
+
+ return tf.reshape(reduced_masked_lm_loss + reduced_masked_ns_loss, (1,))
+
+
+class TFMobileBertIntermediate(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = keras.layers.Dense(config.intermediate_size, name="dense")
+
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
+ else:
+ self.intermediate_act_fn = config.hidden_act
+ self.config = config
+
+ def call(self, hidden_states):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.true_hidden_size])
+
+
+class TFLayerNorm(keras.layers.LayerNormalization):
+ def __init__(self, feat_size, *args, **kwargs):
+ self.feat_size = feat_size
+ super().__init__(*args, **kwargs)
+
+ def build(self, input_shape=None):
+ super().build([None, None, self.feat_size])
+
+
+class TFNoNorm(keras.layers.Layer):
+ def __init__(self, feat_size, epsilon=None, **kwargs):
+ super().__init__(**kwargs)
+ self.feat_size = feat_size
+
+ def build(self, input_shape):
+ self.bias = self.add_weight("bias", shape=[self.feat_size], initializer="zeros")
+ self.weight = self.add_weight("weight", shape=[self.feat_size], initializer="ones")
+ super().build(input_shape)
+
+ def call(self, inputs: tf.Tensor):
+ return inputs * self.weight + self.bias
+
+
+NORM2FN = {"layer_norm": TFLayerNorm, "no_norm": TFNoNorm}
+
+
+class TFMobileBertEmbeddings(keras.layers.Layer):
+ """Construct the embeddings from word, position and token_type embeddings."""
+
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.trigram_input = config.trigram_input
+ self.embedding_size = config.embedding_size
+ self.config = config
+ self.hidden_size = config.hidden_size
+ self.max_position_embeddings = config.max_position_embeddings
+ self.initializer_range = config.initializer_range
+ self.embedding_transformation = keras.layers.Dense(config.hidden_size, name="embedding_transformation")
+
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
+ # any TensorFlow checkpoint file
+ self.LayerNorm = NORM2FN[config.normalization_type](
+ config.hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm"
+ )
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
+ self.embedded_input_size = self.embedding_size * (3 if self.trigram_input else 1)
+
+ def build(self, input_shape=None):
+ with tf.name_scope("word_embeddings"):
+ self.weight = self.add_weight(
+ name="weight",
+ shape=[self.config.vocab_size, self.embedding_size],
+ initializer=get_initializer(initializer_range=self.initializer_range),
+ )
+
+ with tf.name_scope("token_type_embeddings"):
+ self.token_type_embeddings = self.add_weight(
+ name="embeddings",
+ shape=[self.config.type_vocab_size, self.hidden_size],
+ initializer=get_initializer(initializer_range=self.initializer_range),
+ )
+
+ with tf.name_scope("position_embeddings"):
+ self.position_embeddings = self.add_weight(
+ name="embeddings",
+ shape=[self.max_position_embeddings, self.hidden_size],
+ initializer=get_initializer(initializer_range=self.initializer_range),
+ )
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embedding_transformation", None) is not None:
+ with tf.name_scope(self.embedding_transformation.name):
+ self.embedding_transformation.build([None, None, self.embedded_input_size])
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build(None)
+
+ def call(self, input_ids=None, position_ids=None, token_type_ids=None, inputs_embeds=None, training=False):
+ """
+ Applies embedding based on inputs tensor.
+
+ Returns:
+ final_embeddings (`tf.Tensor`): output embedding tensor.
+ """
+ assert not (input_ids is None and inputs_embeds is None)
+
+ if input_ids is not None:
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
+
+ input_shape = shape_list(inputs_embeds)[:-1]
+
+ if token_type_ids is None:
+ token_type_ids = tf.fill(dims=input_shape, value=0)
+
+ if self.trigram_input:
+ # From the paper MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited
+ # Devices (https://arxiv.org/abs/2004.02984)
+ #
+ # The embedding table in BERT models accounts for a substantial proportion of model size. To compress
+ # the embedding layer, we reduce the embedding dimension to 128 in MobileBERT.
+ # Then, we apply a 1D convolution with kernel size 3 on the raw token embedding to produce a 512
+ # dimensional output.
+ inputs_embeds = tf.concat(
+ [
+ tf.pad(inputs_embeds[:, 1:], ((0, 0), (0, 1), (0, 0))),
+ inputs_embeds,
+ tf.pad(inputs_embeds[:, :-1], ((0, 0), (1, 0), (0, 0))),
+ ],
+ axis=2,
+ )
+
+ if self.trigram_input or self.embedding_size != self.hidden_size:
+ inputs_embeds = self.embedding_transformation(inputs_embeds)
+
+ if position_ids is None:
+ position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
+
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
+ token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
+ final_embeddings = inputs_embeds + position_embeds + token_type_embeds
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
+
+ return final_embeddings
+
+
+class TFMobileBertSelfAttention(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ if config.hidden_size % config.num_attention_heads != 0:
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads}"
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.output_attentions = config.output_attentions
+ assert config.hidden_size % config.num_attention_heads == 0
+ self.attention_head_size = int(config.true_hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = keras.layers.Dense(
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
+ )
+ self.key = keras.layers.Dense(
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
+ )
+ self.value = keras.layers.Dense(
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
+ )
+
+ self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob)
+ self.config = config
+
+ def transpose_for_scores(self, x, batch_size):
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
+ x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size))
+ return tf.transpose(x, perm=[0, 2, 1, 3])
+
+ def call(
+ self, query_tensor, key_tensor, value_tensor, attention_mask, head_mask, output_attentions, training=False
+ ):
+ batch_size = shape_list(attention_mask)[0]
+ mixed_query_layer = self.query(query_tensor)
+ mixed_key_layer = self.key(key_tensor)
+ mixed_value_layer = self.value(value_tensor)
+ query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
+ key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
+ value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = tf.matmul(
+ query_layer, key_layer, transpose_b=True
+ ) # (batch size, num_heads, seq_len_q, seq_len_k)
+ dk = tf.cast(shape_list(key_layer)[-1], dtype=attention_scores.dtype) # scale attention_scores
+ attention_scores = attention_scores / tf.math.sqrt(dk)
+
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in TFMobileBertModel call() function)
+ attention_mask = tf.cast(attention_mask, dtype=attention_scores.dtype)
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = stable_softmax(attention_scores, axis=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs, training=training)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = tf.matmul(attention_probs, value_layer)
+
+ context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
+ context_layer = tf.reshape(
+ context_layer, (batch_size, -1, self.all_head_size)
+ ) # (batch_size, seq_len_q, all_head_size)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "query", None) is not None:
+ with tf.name_scope(self.query.name):
+ self.query.build([None, None, self.config.true_hidden_size])
+ if getattr(self, "key", None) is not None:
+ with tf.name_scope(self.key.name):
+ self.key.build([None, None, self.config.true_hidden_size])
+ if getattr(self, "value", None) is not None:
+ with tf.name_scope(self.value.name):
+ self.value.build(
+ [
+ None,
+ None,
+ self.config.true_hidden_size
+ if self.config.use_bottleneck_attention
+ else self.config.hidden_size,
+ ]
+ )
+
+
+class TFMobileBertSelfOutput(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.use_bottleneck = config.use_bottleneck
+ self.dense = keras.layers.Dense(
+ config.true_hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+ self.LayerNorm = NORM2FN[config.normalization_type](
+ config.true_hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm"
+ )
+ if not self.use_bottleneck:
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
+ self.config = config
+
+ def call(self, hidden_states, residual_tensor, training=False):
+ hidden_states = self.dense(hidden_states)
+ if not self.use_bottleneck:
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = self.LayerNorm(hidden_states + residual_tensor)
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.true_hidden_size])
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build(None)
+
+
+class TFMobileBertAttention(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.self = TFMobileBertSelfAttention(config, name="self")
+ self.mobilebert_output = TFMobileBertSelfOutput(config, name="output")
+
+ def prune_heads(self, heads):
+ raise NotImplementedError
+
+ def call(
+ self,
+ query_tensor,
+ key_tensor,
+ value_tensor,
+ layer_input,
+ attention_mask,
+ head_mask,
+ output_attentions,
+ training=False,
+ ):
+ self_outputs = self.self(
+ query_tensor, key_tensor, value_tensor, attention_mask, head_mask, output_attentions, training=training
+ )
+
+ attention_output = self.mobilebert_output(self_outputs[0], layer_input, training=training)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "self", None) is not None:
+ with tf.name_scope(self.self.name):
+ self.self.build(None)
+ if getattr(self, "mobilebert_output", None) is not None:
+ with tf.name_scope(self.mobilebert_output.name):
+ self.mobilebert_output.build(None)
+
+
+class TFOutputBottleneck(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.dense = keras.layers.Dense(config.hidden_size, name="dense")
+ self.LayerNorm = NORM2FN[config.normalization_type](
+ config.hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm"
+ )
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
+ self.config = config
+
+ def call(self, hidden_states, residual_tensor, training=False):
+ layer_outputs = self.dense(hidden_states)
+ layer_outputs = self.dropout(layer_outputs, training=training)
+ layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)
+ return layer_outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.true_hidden_size])
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build(None)
+
+
+class TFMobileBertOutput(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.use_bottleneck = config.use_bottleneck
+ self.dense = keras.layers.Dense(
+ config.true_hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+ self.LayerNorm = NORM2FN[config.normalization_type](
+ config.true_hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm"
+ )
+ if not self.use_bottleneck:
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
+ else:
+ self.bottleneck = TFOutputBottleneck(config, name="bottleneck")
+ self.config = config
+
+ def call(self, hidden_states, residual_tensor_1, residual_tensor_2, training=False):
+ hidden_states = self.dense(hidden_states)
+ if not self.use_bottleneck:
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = self.LayerNorm(hidden_states + residual_tensor_1)
+ else:
+ hidden_states = self.LayerNorm(hidden_states + residual_tensor_1)
+ hidden_states = self.bottleneck(hidden_states, residual_tensor_2)
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.intermediate_size])
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build(None)
+ if getattr(self, "bottleneck", None) is not None:
+ with tf.name_scope(self.bottleneck.name):
+ self.bottleneck.build(None)
+
+
+class TFBottleneckLayer(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.dense = keras.layers.Dense(config.intra_bottleneck_size, name="dense")
+ self.LayerNorm = NORM2FN[config.normalization_type](
+ config.intra_bottleneck_size, epsilon=config.layer_norm_eps, name="LayerNorm"
+ )
+ self.config = config
+
+ def call(self, inputs):
+ hidden_states = self.dense(inputs)
+ hidden_states = self.LayerNorm(hidden_states)
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build(None)
+
+
+class TFBottleneck(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.key_query_shared_bottleneck = config.key_query_shared_bottleneck
+ self.use_bottleneck_attention = config.use_bottleneck_attention
+ self.bottleneck_input = TFBottleneckLayer(config, name="input")
+ if self.key_query_shared_bottleneck:
+ self.attention = TFBottleneckLayer(config, name="attention")
+
+ def call(self, hidden_states):
+ # This method can return three different tuples of values. These different values make use of bottlenecks,
+ # which are linear layers used to project the hidden states to a lower-dimensional vector, reducing memory
+ # usage. These linear layer have weights that are learned during training.
+ #
+ # If `config.use_bottleneck_attention`, it will return the result of the bottleneck layer four times for the
+ # key, query, value, and "layer input" to be used by the attention layer.
+ # This bottleneck is used to project the hidden. This last layer input will be used as a residual tensor
+ # in the attention self output, after the attention scores have been computed.
+ #
+ # If not `config.use_bottleneck_attention` and `config.key_query_shared_bottleneck`, this will return
+ # four values, three of which have been passed through a bottleneck: the query and key, passed through the same
+ # bottleneck, and the residual layer to be applied in the attention self output, through another bottleneck.
+ #
+ # Finally, in the last case, the values for the query, key and values are the hidden states without bottleneck,
+ # and the residual layer will be this value passed through a bottleneck.
+
+ bottlenecked_hidden_states = self.bottleneck_input(hidden_states)
+ if self.use_bottleneck_attention:
+ return (bottlenecked_hidden_states,) * 4
+ elif self.key_query_shared_bottleneck:
+ shared_attention_input = self.attention(hidden_states)
+ return (shared_attention_input, shared_attention_input, hidden_states, bottlenecked_hidden_states)
+ else:
+ return (hidden_states, hidden_states, hidden_states, bottlenecked_hidden_states)
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "bottleneck_input", None) is not None:
+ with tf.name_scope(self.bottleneck_input.name):
+ self.bottleneck_input.build(None)
+ if getattr(self, "attention", None) is not None:
+ with tf.name_scope(self.attention.name):
+ self.attention.build(None)
+
+
+class TFFFNOutput(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.dense = keras.layers.Dense(config.true_hidden_size, name="dense")
+ self.LayerNorm = NORM2FN[config.normalization_type](
+ config.true_hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm"
+ )
+ self.config = config
+
+ def call(self, hidden_states, residual_tensor):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + residual_tensor)
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.intermediate_size])
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build(None)
+
+
+class TFFFNLayer(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.intermediate = TFMobileBertIntermediate(config, name="intermediate")
+ self.mobilebert_output = TFFFNOutput(config, name="output")
+
+ def call(self, hidden_states):
+ intermediate_output = self.intermediate(hidden_states)
+ layer_outputs = self.mobilebert_output(intermediate_output, hidden_states)
+ return layer_outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "intermediate", None) is not None:
+ with tf.name_scope(self.intermediate.name):
+ self.intermediate.build(None)
+ if getattr(self, "mobilebert_output", None) is not None:
+ with tf.name_scope(self.mobilebert_output.name):
+ self.mobilebert_output.build(None)
+
+
+class TFMobileBertLayer(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.use_bottleneck = config.use_bottleneck
+ self.num_feedforward_networks = config.num_feedforward_networks
+ self.attention = TFMobileBertAttention(config, name="attention")
+ self.intermediate = TFMobileBertIntermediate(config, name="intermediate")
+ self.mobilebert_output = TFMobileBertOutput(config, name="output")
+
+ if self.use_bottleneck:
+ self.bottleneck = TFBottleneck(config, name="bottleneck")
+ if config.num_feedforward_networks > 1:
+ self.ffn = [TFFFNLayer(config, name=f"ffn.{i}") for i in range(config.num_feedforward_networks - 1)]
+
+ def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False):
+ if self.use_bottleneck:
+ query_tensor, key_tensor, value_tensor, layer_input = self.bottleneck(hidden_states)
+ else:
+ query_tensor, key_tensor, value_tensor, layer_input = [hidden_states] * 4
+
+ attention_outputs = self.attention(
+ query_tensor,
+ key_tensor,
+ value_tensor,
+ layer_input,
+ attention_mask,
+ head_mask,
+ output_attentions,
+ training=training,
+ )
+
+ attention_output = attention_outputs[0]
+ s = (attention_output,)
+
+ if self.num_feedforward_networks != 1:
+ for i, ffn_module in enumerate(self.ffn):
+ attention_output = ffn_module(attention_output)
+ s += (attention_output,)
+
+ intermediate_output = self.intermediate(attention_output)
+ layer_output = self.mobilebert_output(intermediate_output, attention_output, hidden_states, training=training)
+
+ outputs = (
+ (layer_output,)
+ + attention_outputs[1:]
+ + (
+ tf.constant(0),
+ query_tensor,
+ key_tensor,
+ value_tensor,
+ layer_input,
+ attention_output,
+ intermediate_output,
+ )
+ + s
+ ) # add attentions if we output them
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "attention", None) is not None:
+ with tf.name_scope(self.attention.name):
+ self.attention.build(None)
+ if getattr(self, "intermediate", None) is not None:
+ with tf.name_scope(self.intermediate.name):
+ self.intermediate.build(None)
+ if getattr(self, "mobilebert_output", None) is not None:
+ with tf.name_scope(self.mobilebert_output.name):
+ self.mobilebert_output.build(None)
+ if getattr(self, "bottleneck", None) is not None:
+ with tf.name_scope(self.bottleneck.name):
+ self.bottleneck.build(None)
+ if getattr(self, "ffn", None) is not None:
+ for layer in self.ffn:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+class TFMobileBertEncoder(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.output_attentions = config.output_attentions
+ self.output_hidden_states = config.output_hidden_states
+ self.layer = [TFMobileBertLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
+
+ def call(
+ self,
+ hidden_states,
+ attention_mask,
+ head_mask,
+ output_attentions,
+ output_hidden_states,
+ return_dict,
+ training=False,
+ ):
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_outputs = layer_module(
+ hidden_states, attention_mask, head_mask[i], output_attentions, training=training
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ # Add last layer
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
+ return TFBaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "layer", None) is not None:
+ for layer in self.layer:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+class TFMobileBertPooler(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.do_activate = config.classifier_activation
+ if self.do_activate:
+ self.dense = keras.layers.Dense(
+ config.hidden_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ activation="tanh",
+ name="dense",
+ )
+ self.config = config
+
+ def call(self, hidden_states):
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ if not self.do_activate:
+ return first_token_tensor
+ else:
+ pooled_output = self.dense(first_token_tensor)
+ return pooled_output
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+
+
+class TFMobileBertPredictionHeadTransform(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.dense = keras.layers.Dense(
+ config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+ if isinstance(config.hidden_act, str):
+ self.transform_act_fn = get_tf_activation(config.hidden_act)
+ else:
+ self.transform_act_fn = config.hidden_act
+ self.LayerNorm = NORM2FN["layer_norm"](config.hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm")
+ self.config = config
+
+ def call(self, hidden_states):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.transform_act_fn(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states)
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build(None)
+
+
+class TFMobileBertLMPredictionHead(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.transform = TFMobileBertPredictionHeadTransform(config, name="transform")
+ self.config = config
+
+ def build(self, input_shape=None):
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
+ self.dense = self.add_weight(
+ shape=(self.config.hidden_size - self.config.embedding_size, self.config.vocab_size),
+ initializer="zeros",
+ trainable=True,
+ name="dense/weight",
+ )
+ self.decoder = self.add_weight(
+ shape=(self.config.vocab_size, self.config.embedding_size),
+ initializer="zeros",
+ trainable=True,
+ name="decoder/weight",
+ )
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "transform", None) is not None:
+ with tf.name_scope(self.transform.name):
+ self.transform.build(None)
+
+ def get_output_embeddings(self):
+ return self
+
+ def set_output_embeddings(self, value):
+ self.decoder = value
+ self.config.vocab_size = shape_list(value)[0]
+
+ def get_bias(self):
+ return {"bias": self.bias}
+
+ def set_bias(self, value):
+ self.bias = value["bias"]
+ self.config.vocab_size = shape_list(value["bias"])[0]
+
+ def call(self, hidden_states):
+ hidden_states = self.transform(hidden_states)
+ hidden_states = tf.matmul(hidden_states, tf.concat([tf.transpose(self.decoder), self.dense], axis=0))
+ hidden_states = hidden_states + self.bias
+ return hidden_states
+
+
+class TFMobileBertMLMHead(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.predictions = TFMobileBertLMPredictionHead(config, name="predictions")
+
+ def call(self, sequence_output):
+ prediction_scores = self.predictions(sequence_output)
+ return prediction_scores
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "predictions", None) is not None:
+ with tf.name_scope(self.predictions.name):
+ self.predictions.build(None)
+
+
+@keras_serializable
+class TFMobileBertMainLayer(keras.layers.Layer):
+ config_class = MobileBertConfig
+
+ def __init__(self, config, add_pooling_layer=True, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.num_hidden_layers = config.num_hidden_layers
+ self.output_attentions = config.output_attentions
+ self.output_hidden_states = config.output_hidden_states
+ self.return_dict = config.use_return_dict
+
+ self.embeddings = TFMobileBertEmbeddings(config, name="embeddings")
+ self.encoder = TFMobileBertEncoder(config, name="encoder")
+ self.pooler = TFMobileBertPooler(config, name="pooler") if add_pooling_layer else None
+
+ def get_input_embeddings(self):
+ return self.embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.weight = value
+ self.embeddings.vocab_size = shape_list(value)[0]
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ raise NotImplementedError
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ token_type_ids=None,
+ position_ids=None,
+ head_mask=None,
+ inputs_embeds=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ training=False,
+ ):
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = shape_list(input_ids)
+ elif inputs_embeds is not None:
+ input_shape = shape_list(inputs_embeds)[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if attention_mask is None:
+ attention_mask = tf.fill(input_shape, 1)
+
+ if token_type_ids is None:
+ token_type_ids = tf.fill(input_shape, 0)
+
+ embedding_output = self.embeddings(input_ids, position_ids, token_type_ids, inputs_embeds, training=training)
+
+ # We create a 3D attention mask from a 2D tensor mask.
+ # Sizes are [batch_size, 1, 1, to_seq_length]
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
+ # this attention mask is more simple than the triangular masking of causal attention
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
+ extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1]))
+
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
+ # masked positions, this operation will create a tensor which is 0.0 for
+ # positions we want to attend and -10000.0 for masked positions.
+ # Since we are adding it to the raw scores before the softmax, this is
+ # effectively the same as removing these entirely.
+ extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
+ one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
+ ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
+ extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ if head_mask is not None:
+ raise NotImplementedError
+ else:
+ head_mask = [None] * self.num_hidden_layers
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ extended_attention_mask,
+ head_mask,
+ output_attentions,
+ output_hidden_states,
+ return_dict,
+ training=training,
+ )
+
+ sequence_output = encoder_outputs[0]
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
+
+ if not return_dict:
+ return (
+ sequence_output,
+ pooled_output,
+ ) + encoder_outputs[1:]
+
+ return TFBaseModelOutputWithPooling(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embeddings", None) is not None:
+ with tf.name_scope(self.embeddings.name):
+ self.embeddings.build(None)
+ if getattr(self, "encoder", None) is not None:
+ with tf.name_scope(self.encoder.name):
+ self.encoder.build(None)
+ if getattr(self, "pooler", None) is not None:
+ with tf.name_scope(self.pooler.name):
+ self.pooler.build(None)
+
+
+class TFMobileBertPreTrainedModel(TFPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = MobileBertConfig
+ base_model_prefix = "mobilebert"
+
+
+@dataclass
+class TFMobileBertForPreTrainingOutput(ModelOutput):
+ """
+ Output type of [`TFMobileBertForPreTraining`].
+
+ Args:
+ prediction_logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ seq_relationship_logits (`tf.Tensor` of shape `(batch_size, 2)`):
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
+ before SoftMax).
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: tf.Tensor | None = None
+ prediction_logits: tf.Tensor = None
+ seq_relationship_logits: tf.Tensor = None
+ hidden_states: Tuple[tf.Tensor] | None = None
+ attentions: Tuple[tf.Tensor] | None = None
+
+
+MOBILEBERT_START_DOCSTRING = r"""
+
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TensorFlow models and layers in `transformers` accept two formats as input:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional argument.
+
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
+ positional argument:
+
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
+
+ Note that when creating models and layers with
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
+ about any of this, as you can just pass inputs like you would to any other Python function!
+
+
+
+ Parameters:
+ config ([`MobileBertConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+MOBILEBERT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
+ [`PreTrainedTokenizer.encode`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
+ config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+"""
+
+
+@add_start_docstrings(
+ "The bare MobileBert Model transformer outputting raw hidden-states without any specific head on top.",
+ MOBILEBERT_START_DOCSTRING,
+)
+class TFMobileBertModel(TFMobileBertPreTrainedModel):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.mobilebert = TFMobileBertMainLayer(config, name="mobilebert")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFBaseModelOutputWithPooling,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ ) -> Union[Tuple, TFBaseModelOutputWithPooling]:
+ outputs = self.mobilebert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "mobilebert", None) is not None:
+ with tf.name_scope(self.mobilebert.name):
+ self.mobilebert.build(None)
+
+
+@add_start_docstrings(
+ """
+ MobileBert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a
+ `next sentence prediction (classification)` head.
+ """,
+ MOBILEBERT_START_DOCSTRING,
+)
+class TFMobileBertForPreTraining(TFMobileBertPreTrainedModel, TFMobileBertPreTrainingLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.mobilebert = TFMobileBertMainLayer(config, name="mobilebert")
+ self.predictions = TFMobileBertMLMHead(config, name="predictions___cls")
+ self.seq_relationship = TFMobileBertOnlyNSPHead(config, name="seq_relationship___cls")
+
+ def get_lm_head(self):
+ return self.predictions.predictions
+
+ def get_prefix_bias_name(self):
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
+ return self.name + "/" + self.predictions.name + "/" + self.predictions.predictions.name
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=TFMobileBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ next_sentence_label: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[Tuple, TFMobileBertForPreTrainingOutput]:
+ r"""
+ Return:
+
+ Examples:
+
+ ```python
+ >>> import tensorflow as tf
+ >>> from transformers import AutoTokenizer, TFMobileBertForPreTraining
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/mobilebert-uncased")
+ >>> model = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased")
+ >>> input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
+ >>> outputs = model(input_ids)
+ >>> prediction_scores, seq_relationship_scores = outputs[:2]
+ ```"""
+ outputs = self.mobilebert(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ sequence_output, pooled_output = outputs[:2]
+ prediction_scores = self.predictions(sequence_output)
+ seq_relationship_score = self.seq_relationship(pooled_output)
+
+ total_loss = None
+ if labels is not None and next_sentence_label is not None:
+ d_labels = {"labels": labels}
+ d_labels["next_sentence_label"] = next_sentence_label
+ total_loss = self.hf_compute_loss(labels=d_labels, logits=(prediction_scores, seq_relationship_score))
+
+ if not return_dict:
+ output = (prediction_scores, seq_relationship_score) + outputs[2:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return TFMobileBertForPreTrainingOutput(
+ loss=total_loss,
+ prediction_logits=prediction_scores,
+ seq_relationship_logits=seq_relationship_score,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "mobilebert", None) is not None:
+ with tf.name_scope(self.mobilebert.name):
+ self.mobilebert.build(None)
+ if getattr(self, "predictions", None) is not None:
+ with tf.name_scope(self.predictions.name):
+ self.predictions.build(None)
+ if getattr(self, "seq_relationship", None) is not None:
+ with tf.name_scope(self.seq_relationship.name):
+ self.seq_relationship.build(None)
+
+ def tf_to_pt_weight_rename(self, tf_weight):
+ if tf_weight == "cls.predictions.decoder.weight":
+ return tf_weight, "mobilebert.embeddings.word_embeddings.weight"
+ else:
+ return (tf_weight,)
+
+
+@add_start_docstrings("""MobileBert Model with a `language modeling` head on top.""", MOBILEBERT_START_DOCSTRING)
+class TFMobileBertForMaskedLM(TFMobileBertPreTrainedModel, TFMaskedLanguageModelingLoss):
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
+ _keys_to_ignore_on_load_unexpected = [
+ r"pooler",
+ r"seq_relationship___cls",
+ r"cls.seq_relationship",
+ ]
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.mobilebert = TFMobileBertMainLayer(config, add_pooling_layer=False, name="mobilebert")
+ self.predictions = TFMobileBertMLMHead(config, name="predictions___cls")
+
+ def get_lm_head(self):
+ return self.predictions.predictions
+
+ def get_prefix_bias_name(self):
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
+ return self.name + "/" + self.mlm.name + "/" + self.mlm.predictions.name
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFMaskedLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output="'paris'",
+ expected_loss=0.57,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[Tuple, TFMaskedLMOutput]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels
+ """
+ outputs = self.mobilebert(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+ prediction_scores = self.predictions(sequence_output, training=training)
+
+ loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores)
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFMaskedLMOutput(
+ loss=loss,
+ logits=prediction_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "mobilebert", None) is not None:
+ with tf.name_scope(self.mobilebert.name):
+ self.mobilebert.build(None)
+ if getattr(self, "predictions", None) is not None:
+ with tf.name_scope(self.predictions.name):
+ self.predictions.build(None)
+
+ def tf_to_pt_weight_rename(self, tf_weight):
+ if tf_weight == "cls.predictions.decoder.weight":
+ return tf_weight, "mobilebert.embeddings.word_embeddings.weight"
+ else:
+ return (tf_weight,)
+
+
+class TFMobileBertOnlyNSPHead(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.seq_relationship = keras.layers.Dense(2, name="seq_relationship")
+ self.config = config
+
+ def call(self, pooled_output):
+ seq_relationship_score = self.seq_relationship(pooled_output)
+ return seq_relationship_score
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "seq_relationship", None) is not None:
+ with tf.name_scope(self.seq_relationship.name):
+ self.seq_relationship.build([None, None, self.config.hidden_size])
+
+
+@add_start_docstrings(
+ """MobileBert Model with a `next sentence prediction (classification)` head on top.""",
+ MOBILEBERT_START_DOCSTRING,
+)
+class TFMobileBertForNextSentencePrediction(TFMobileBertPreTrainedModel, TFNextSentencePredictionLoss):
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
+ _keys_to_ignore_on_load_unexpected = [r"predictions___cls", r"cls.predictions"]
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.mobilebert = TFMobileBertMainLayer(config, name="mobilebert")
+ self.cls = TFMobileBertOnlyNSPHead(config, name="seq_relationship___cls")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=TFNextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ next_sentence_label: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[Tuple, TFNextSentencePredictorOutput]:
+ r"""
+ Return:
+
+ Examples:
+
+ ```python
+ >>> import tensorflow as tf
+ >>> from transformers import AutoTokenizer, TFMobileBertForNextSentencePrediction
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/mobilebert-uncased")
+ >>> model = TFMobileBertForNextSentencePrediction.from_pretrained("google/mobilebert-uncased")
+
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
+ >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
+ >>> encoding = tokenizer(prompt, next_sentence, return_tensors="tf")
+
+ >>> logits = model(encoding["input_ids"], token_type_ids=encoding["token_type_ids"])[0]
+ ```"""
+ outputs = self.mobilebert(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ pooled_output = outputs[1]
+ seq_relationship_scores = self.cls(pooled_output)
+
+ next_sentence_loss = (
+ None
+ if next_sentence_label is None
+ else self.hf_compute_loss(labels=next_sentence_label, logits=seq_relationship_scores)
+ )
+
+ if not return_dict:
+ output = (seq_relationship_scores,) + outputs[2:]
+ return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
+
+ return TFNextSentencePredictorOutput(
+ loss=next_sentence_loss,
+ logits=seq_relationship_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "mobilebert", None) is not None:
+ with tf.name_scope(self.mobilebert.name):
+ self.mobilebert.build(None)
+ if getattr(self, "cls", None) is not None:
+ with tf.name_scope(self.cls.name):
+ self.cls.build(None)
+
+
+@add_start_docstrings(
+ """
+ MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
+ pooled output) e.g. for GLUE tasks.
+ """,
+ MOBILEBERT_START_DOCSTRING,
+)
+class TFMobileBertForSequenceClassification(TFMobileBertPreTrainedModel, TFSequenceClassificationLoss):
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
+ _keys_to_ignore_on_load_unexpected = [
+ r"predictions___cls",
+ r"seq_relationship___cls",
+ r"cls.predictions",
+ r"cls.seq_relationship",
+ ]
+ _keys_to_ignore_on_load_missing = [r"dropout"]
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.num_labels = config.num_labels
+
+ self.mobilebert = TFMobileBertMainLayer(config, name="mobilebert")
+ classifier_dropout = (
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
+ )
+ self.dropout = keras.layers.Dropout(classifier_dropout)
+ self.classifier = keras.layers.Dense(
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION,
+ output_type=TFSequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,
+ expected_loss=_SEQ_CLASS_EXPECTED_LOSS,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[Tuple, TFSequenceClassifierOutput]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ outputs = self.mobilebert(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ pooled_output = outputs[1]
+
+ pooled_output = self.dropout(pooled_output, training=training)
+ logits = self.classifier(pooled_output)
+
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFSequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "mobilebert", None) is not None:
+ with tf.name_scope(self.mobilebert.name):
+ self.mobilebert.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.hidden_size])
+
+
+@add_start_docstrings(
+ """
+ MobileBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
+ linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ MOBILEBERT_START_DOCSTRING,
+)
+class TFMobileBertForQuestionAnswering(TFMobileBertPreTrainedModel, TFQuestionAnsweringLoss):
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
+ _keys_to_ignore_on_load_unexpected = [
+ r"pooler",
+ r"predictions___cls",
+ r"seq_relationship___cls",
+ r"cls.predictions",
+ r"cls.seq_relationship",
+ ]
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.num_labels = config.num_labels
+
+ self.mobilebert = TFMobileBertMainLayer(config, add_pooling_layer=False, name="mobilebert")
+ self.qa_outputs = keras.layers.Dense(
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_QA,
+ output_type=TFQuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ qa_target_start_index=_QA_TARGET_START_INDEX,
+ qa_target_end_index=_QA_TARGET_END_INDEX,
+ expected_output=_QA_EXPECTED_OUTPUT,
+ expected_loss=_QA_EXPECTED_LOSS,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ start_positions: np.ndarray | tf.Tensor | None = None,
+ end_positions: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[Tuple, TFQuestionAnsweringModelOutput]:
+ r"""
+ start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ outputs = self.mobilebert(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = tf.split(logits, 2, axis=-1)
+ start_logits = tf.squeeze(start_logits, axis=-1)
+ end_logits = tf.squeeze(end_logits, axis=-1)
+
+ loss = None
+ if start_positions is not None and end_positions is not None:
+ labels = {"start_position": start_positions, "end_position": end_positions}
+ loss = self.hf_compute_loss(labels, (start_logits, end_logits))
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFQuestionAnsweringModelOutput(
+ loss=loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "mobilebert", None) is not None:
+ with tf.name_scope(self.mobilebert.name):
+ self.mobilebert.build(None)
+ if getattr(self, "qa_outputs", None) is not None:
+ with tf.name_scope(self.qa_outputs.name):
+ self.qa_outputs.build([None, None, self.config.hidden_size])
+
+
+@add_start_docstrings(
+ """
+ MobileBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
+ a softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ MOBILEBERT_START_DOCSTRING,
+)
+class TFMobileBertForMultipleChoice(TFMobileBertPreTrainedModel, TFMultipleChoiceLoss):
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
+ _keys_to_ignore_on_load_unexpected = [
+ r"predictions___cls",
+ r"seq_relationship___cls",
+ r"cls.predictions",
+ r"cls.seq_relationship",
+ ]
+ _keys_to_ignore_on_load_missing = [r"dropout"]
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.mobilebert = TFMobileBertMainLayer(config, name="mobilebert")
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
+ self.classifier = keras.layers.Dense(
+ 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(
+ MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
+ )
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFMultipleChoiceModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[Tuple, TFMultipleChoiceModelOutput]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
+ """
+ if input_ids is not None:
+ num_choices = shape_list(input_ids)[1]
+ seq_length = shape_list(input_ids)[2]
+ else:
+ num_choices = shape_list(inputs_embeds)[1]
+ seq_length = shape_list(inputs_embeds)[2]
+
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
+ flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
+ flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
+ flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None
+ flat_inputs_embeds = (
+ tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3]))
+ if inputs_embeds is not None
+ else None
+ )
+ outputs = self.mobilebert(
+ flat_input_ids,
+ flat_attention_mask,
+ flat_token_type_ids,
+ flat_position_ids,
+ head_mask,
+ flat_inputs_embeds,
+ output_attentions,
+ output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ pooled_output = outputs[1]
+ pooled_output = self.dropout(pooled_output, training=training)
+ logits = self.classifier(pooled_output)
+ reshaped_logits = tf.reshape(logits, (-1, num_choices))
+
+ loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits)
+
+ if not return_dict:
+ output = (reshaped_logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFMultipleChoiceModelOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "mobilebert", None) is not None:
+ with tf.name_scope(self.mobilebert.name):
+ self.mobilebert.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.hidden_size])
+
+
+@add_start_docstrings(
+ """
+ MobileBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
+ for Named-Entity-Recognition (NER) tasks.
+ """,
+ MOBILEBERT_START_DOCSTRING,
+)
+class TFMobileBertForTokenClassification(TFMobileBertPreTrainedModel, TFTokenClassificationLoss):
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
+ _keys_to_ignore_on_load_unexpected = [
+ r"pooler",
+ r"predictions___cls",
+ r"seq_relationship___cls",
+ r"cls.predictions",
+ r"cls.seq_relationship",
+ ]
+ _keys_to_ignore_on_load_missing = [r"dropout"]
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.num_labels = config.num_labels
+
+ self.mobilebert = TFMobileBertMainLayer(config, add_pooling_layer=False, name="mobilebert")
+ classifier_dropout = (
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
+ )
+ self.dropout = keras.layers.Dropout(classifier_dropout)
+ self.classifier = keras.layers.Dense(
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_TOKEN_CLASSIFICATION,
+ output_type=TFTokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_TOKEN_CLASS_EXPECTED_OUTPUT,
+ expected_loss=_TOKEN_CLASS_EXPECTED_LOSS,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[Tuple, TFTokenClassifierOutput]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+ outputs = self.mobilebert(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+
+ sequence_output = self.dropout(sequence_output, training=training)
+ logits = self.classifier(sequence_output)
+
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFTokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "mobilebert", None) is not None:
+ with tf.name_scope(self.mobilebert.name):
+ self.mobilebert.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.hidden_size])
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/tokenization_mobilebert.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/tokenization_mobilebert.py
new file mode 100644
index 0000000000000000000000000000000000000000..f27873e92fcfa9f49ab76aeebf1f8c913f594b23
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/tokenization_mobilebert.py
@@ -0,0 +1,518 @@
+# coding=utf-8
+#
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for MobileBERT."""
+
+
+import collections
+import os
+import unicodedata
+from typing import List, Optional, Tuple
+
+from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"}
+}
+
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"mobilebert-uncased": 512}
+
+
+PRETRAINED_INIT_CONFIGURATION = {}
+
+
+# Copied from transformers.models.bert.tokenization_bert.load_vocab
+def load_vocab(vocab_file):
+ """Loads a vocabulary file into a dictionary."""
+ vocab = collections.OrderedDict()
+ with open(vocab_file, "r", encoding="utf-8") as reader:
+ tokens = reader.readlines()
+ for index, token in enumerate(tokens):
+ token = token.rstrip("\n")
+ vocab[token] = index
+ return vocab
+
+
+# Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
+def whitespace_tokenize(text):
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
+ text = text.strip()
+ if not text:
+ return []
+ tokens = text.split()
+ return tokens
+
+
+# Copied from transformers.models.bert.tokenization_bert.BertTokenizer with BERT->MobileBERT,Bert->MobileBert
+class MobileBertTokenizer(PreTrainedTokenizer):
+ r"""
+ Construct a MobileBERT tokenizer. Based on WordPiece.
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ File containing the vocabulary.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
+ Whether or not to do basic tokenization before WordPiece.
+ never_split (`Iterable`, *optional*):
+ Collection of tokens which will never be split during tokenization. Only has an effect when
+ `do_basic_tokenize=True`
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
+ The token used for padding, for example when batching sequences of different lengths.
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters.
+
+ This should likely be deactivated for Japanese (see this
+ [issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original MobileBERT).
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+
+ def __init__(
+ self,
+ vocab_file,
+ do_lower_case=True,
+ do_basic_tokenize=True,
+ never_split=None,
+ unk_token="[UNK]",
+ sep_token="[SEP]",
+ pad_token="[PAD]",
+ cls_token="[CLS]",
+ mask_token="[MASK]",
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ **kwargs,
+ ):
+ if not os.path.isfile(vocab_file):
+ raise ValueError(
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
+ " model use `tokenizer = MobileBertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
+ )
+ self.vocab = load_vocab(vocab_file)
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
+ self.do_basic_tokenize = do_basic_tokenize
+ if do_basic_tokenize:
+ self.basic_tokenizer = BasicTokenizer(
+ do_lower_case=do_lower_case,
+ never_split=never_split,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ )
+
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
+
+ super().__init__(
+ do_lower_case=do_lower_case,
+ do_basic_tokenize=do_basic_tokenize,
+ never_split=never_split,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ **kwargs,
+ )
+
+ @property
+ def do_lower_case(self):
+ return self.basic_tokenizer.do_lower_case
+
+ @property
+ def vocab_size(self):
+ return len(self.vocab)
+
+ def get_vocab(self):
+ return dict(self.vocab, **self.added_tokens_encoder)
+
+ def _tokenize(self, text, split_special_tokens=False):
+ split_tokens = []
+ if self.do_basic_tokenize:
+ for token in self.basic_tokenizer.tokenize(
+ text, never_split=self.all_special_tokens if not split_special_tokens else None
+ ):
+ # If the token is part of the never_split set
+ if token in self.basic_tokenizer.never_split:
+ split_tokens.append(token)
+ else:
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
+ else:
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
+ return split_tokens
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.ids_to_tokens.get(index, self.unk_token)
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ out_string = " ".join(tokens).replace(" ##", "").strip()
+ return out_string
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A MobileBERT sequence has the following format:
+
+ - single sequence: `[CLS] X [SEP]`
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ return cls + token_ids_0 + sep + token_ids_1 + sep
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is not None:
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1]
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A MobileBERT sequence
+ pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ index = 0
+ if os.path.isdir(save_directory):
+ vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+ else:
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
+ with open(vocab_file, "w", encoding="utf-8") as writer:
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
+ if index != token_index:
+ logger.warning(
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
+ " Please check that the vocabulary is not corrupted!"
+ )
+ index = token_index
+ writer.write(token + "\n")
+ index += 1
+ return (vocab_file,)
+
+
+# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
+class BasicTokenizer(object):
+ """
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
+
+ Args:
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ never_split (`Iterable`, *optional*):
+ Collection of tokens which will never be split during tokenization. Only has an effect when
+ `do_basic_tokenize=True`
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters.
+
+ This should likely be deactivated for Japanese (see this
+ [issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
+ the full context of the words, such as contractions.
+ """
+
+ def __init__(
+ self,
+ do_lower_case=True,
+ never_split=None,
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ do_split_on_punc=True,
+ ):
+ if never_split is None:
+ never_split = []
+ self.do_lower_case = do_lower_case
+ self.never_split = set(never_split)
+ self.tokenize_chinese_chars = tokenize_chinese_chars
+ self.strip_accents = strip_accents
+ self.do_split_on_punc = do_split_on_punc
+
+ def tokenize(self, text, never_split=None):
+ """
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
+
+ Args:
+ never_split (`List[str]`, *optional*)
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
+ """
+ # union() returns a new set by concatenating the two sets.
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
+ text = self._clean_text(text)
+
+ # This was added on November 1st, 2018 for the multilingual and Chinese
+ # models. This is also applied to the English models now, but it doesn't
+ # matter since the English models were not trained on any Chinese data
+ # and generally don't have any Chinese data in them (there are Chinese
+ # characters in the vocabulary because Wikipedia does have some Chinese
+ # words in the English Wikipedia.).
+ if self.tokenize_chinese_chars:
+ text = self._tokenize_chinese_chars(text)
+ # prevents treating the same character with different unicode codepoints as different characters
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
+ split_tokens = []
+ for token in orig_tokens:
+ if token not in never_split:
+ if self.do_lower_case:
+ token = token.lower()
+ if self.strip_accents is not False:
+ token = self._run_strip_accents(token)
+ elif self.strip_accents:
+ token = self._run_strip_accents(token)
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
+
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
+ return output_tokens
+
+ def _run_strip_accents(self, text):
+ """Strips accents from a piece of text."""
+ text = unicodedata.normalize("NFD", text)
+ output = []
+ for char in text:
+ cat = unicodedata.category(char)
+ if cat == "Mn":
+ continue
+ output.append(char)
+ return "".join(output)
+
+ def _run_split_on_punc(self, text, never_split=None):
+ """Splits punctuation on a piece of text."""
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
+ return [text]
+ chars = list(text)
+ i = 0
+ start_new_word = True
+ output = []
+ while i < len(chars):
+ char = chars[i]
+ if _is_punctuation(char):
+ output.append([char])
+ start_new_word = True
+ else:
+ if start_new_word:
+ output.append([])
+ start_new_word = False
+ output[-1].append(char)
+ i += 1
+
+ return ["".join(x) for x in output]
+
+ def _tokenize_chinese_chars(self, text):
+ """Adds whitespace around any CJK character."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if self._is_chinese_char(cp):
+ output.append(" ")
+ output.append(char)
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+ def _is_chinese_char(self, cp):
+ """Checks whether CP is the codepoint of a CJK character."""
+ # This defines a "chinese character" as anything in the CJK Unicode block:
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
+ #
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
+ # despite its name. The modern Korean Hangul alphabet is a different block,
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
+ # space-separated words, so they are not treated specially and handled
+ # like the all of the other languages.
+ if (
+ (cp >= 0x4E00 and cp <= 0x9FFF)
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
+ or (cp >= 0xF900 and cp <= 0xFAFF)
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
+ ): #
+ return True
+
+ return False
+
+ def _clean_text(self, text):
+ """Performs invalid character removal and whitespace cleanup on text."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
+ continue
+ if _is_whitespace(char):
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+
+# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
+class WordpieceTokenizer(object):
+ """Runs WordPiece tokenization."""
+
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
+ self.vocab = vocab
+ self.unk_token = unk_token
+ self.max_input_chars_per_word = max_input_chars_per_word
+
+ def tokenize(self, text):
+ """
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
+ tokenization using the given vocabulary.
+
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
+
+ Args:
+ text: A single token or whitespace separated tokens. This should have
+ already been passed through *BasicTokenizer*.
+
+ Returns:
+ A list of wordpiece tokens.
+ """
+
+ output_tokens = []
+ for token in whitespace_tokenize(text):
+ chars = list(token)
+ if len(chars) > self.max_input_chars_per_word:
+ output_tokens.append(self.unk_token)
+ continue
+
+ is_bad = False
+ start = 0
+ sub_tokens = []
+ while start < len(chars):
+ end = len(chars)
+ cur_substr = None
+ while start < end:
+ substr = "".join(chars[start:end])
+ if start > 0:
+ substr = "##" + substr
+ if substr in self.vocab:
+ cur_substr = substr
+ break
+ end -= 1
+ if cur_substr is None:
+ is_bad = True
+ break
+ sub_tokens.append(cur_substr)
+ start = end
+
+ if is_bad:
+ output_tokens.append(self.unk_token)
+ else:
+ output_tokens.extend(sub_tokens)
+ return output_tokens
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/tokenization_mobilebert_fast.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/tokenization_mobilebert_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b137d2ed60a3547d11889453252f2347f823c22
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/mobilebert/tokenization_mobilebert_fast.py
@@ -0,0 +1,189 @@
+# coding=utf-8
+#
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for MobileBERT."""
+
+import json
+from typing import List, Optional, Tuple
+
+from tokenizers import normalizers
+
+from ...tokenization_utils_fast import PreTrainedTokenizerFast
+from ...utils import logging
+from .tokenization_mobilebert import MobileBertTokenizer
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
+ "tokenizer_file": {
+ "mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
+ },
+}
+
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"mobilebert-uncased": 512}
+
+
+PRETRAINED_INIT_CONFIGURATION = {}
+
+
+# Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast with BERT->MobileBERT,Bert->MobileBert
+class MobileBertTokenizerFast(PreTrainedTokenizerFast):
+ r"""
+ Construct a "fast" MobileBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
+
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
+ refer to this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ File containing the vocabulary.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
+ The token used for padding, for example when batching sequences of different lengths.
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ clean_text (`bool`, *optional*, defaults to `True`):
+ Whether or not to clean the text before tokenization by removing any control characters and replacing all
+ whitespaces by the classic one.
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
+ issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original MobileBERT).
+ wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
+ The prefix for subwords.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ slow_tokenizer_class = MobileBertTokenizer
+
+ def __init__(
+ self,
+ vocab_file=None,
+ tokenizer_file=None,
+ do_lower_case=True,
+ unk_token="[UNK]",
+ sep_token="[SEP]",
+ pad_token="[PAD]",
+ cls_token="[CLS]",
+ mask_token="[MASK]",
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ **kwargs,
+ ):
+ super().__init__(
+ vocab_file,
+ tokenizer_file=tokenizer_file,
+ do_lower_case=do_lower_case,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ **kwargs,
+ )
+
+ normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
+ if (
+ normalizer_state.get("lowercase", do_lower_case) != do_lower_case
+ or normalizer_state.get("strip_accents", strip_accents) != strip_accents
+ or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
+ ):
+ normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
+ normalizer_state["lowercase"] = do_lower_case
+ normalizer_state["strip_accents"] = strip_accents
+ normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
+ self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
+
+ self.do_lower_case = do_lower_case
+
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A MobileBERT sequence has the following format:
+
+ - single sequence: `[CLS] X [SEP]`
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+
+ if token_ids_1 is not None:
+ output += token_ids_1 + [self.sep_token_id]
+
+ return output
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A MobileBERT sequence
+ pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
+ return tuple(files)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..997f88234fc2c8341497c7a48a74b5526769aab5
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__init__.py
@@ -0,0 +1,96 @@
+# Copyright 2021 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_tokenizers_available,
+ is_torch_available,
+ is_vision_available,
+)
+
+
+_import_structure = {
+ "configuration_perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverOnnxConfig"],
+ "tokenization_perceiver": ["PerceiverTokenizer"],
+}
+
+try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["feature_extraction_perceiver"] = ["PerceiverFeatureExtractor"]
+ _import_structure["image_processing_perceiver"] = ["PerceiverImageProcessor"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_perceiver"] = [
+ "PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "PerceiverForImageClassificationConvProcessing",
+ "PerceiverForImageClassificationFourier",
+ "PerceiverForImageClassificationLearned",
+ "PerceiverForMaskedLM",
+ "PerceiverForMultimodalAutoencoding",
+ "PerceiverForOpticalFlow",
+ "PerceiverForSequenceClassification",
+ "PerceiverLayer",
+ "PerceiverModel",
+ "PerceiverPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
+ from .tokenization_perceiver import PerceiverTokenizer
+
+ try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .feature_extraction_perceiver import PerceiverFeatureExtractor
+ from .image_processing_perceiver import PerceiverImageProcessor
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_perceiver import (
+ PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
+ PerceiverForImageClassificationConvProcessing,
+ PerceiverForImageClassificationFourier,
+ PerceiverForImageClassificationLearned,
+ PerceiverForMaskedLM,
+ PerceiverForMultimodalAutoencoding,
+ PerceiverForOpticalFlow,
+ PerceiverForSequenceClassification,
+ PerceiverLayer,
+ PerceiverModel,
+ PerceiverPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4500f38ac956acfd4e3852ad76b0061fb2df818b
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/configuration_perceiver.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/configuration_perceiver.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..13e143e6a95bd4d59b371519eec3e8adc2ad5375
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/configuration_perceiver.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/convert_perceiver_haiku_to_pytorch.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/convert_perceiver_haiku_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b5da66e09c63b0ba40a6ddc7c5088987fe641901
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/convert_perceiver_haiku_to_pytorch.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/feature_extraction_perceiver.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/feature_extraction_perceiver.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e6441c177f368ec3ac7eb36e2900dfd00282bb3d
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/feature_extraction_perceiver.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/image_processing_perceiver.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/image_processing_perceiver.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1ee432024cf1ea9141bf32568afab94970c10b86
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/image_processing_perceiver.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/modeling_perceiver.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/modeling_perceiver.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..697be4063807489a63617da95b548b369cbfc375
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/modeling_perceiver.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/tokenization_perceiver.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/tokenization_perceiver.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5402357140a9fc6952e061dbc25f753fc6b27eb9
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/tokenization_perceiver.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/configuration_perceiver.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/configuration_perceiver.py
new file mode 100644
index 0000000000000000000000000000000000000000..d741b287e5db7c3c5667b982ce7a76be2dc39e43
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/configuration_perceiver.py
@@ -0,0 +1,246 @@
+# coding=utf-8
+# Copyright Deepmind and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Perceiver model configuration"""
+
+from collections import OrderedDict
+from typing import Any, Mapping, Optional, Union
+
+from ...configuration_utils import PretrainedConfig
+from ...feature_extraction_utils import FeatureExtractionMixin
+from ...onnx import OnnxConfig
+from ...onnx.utils import compute_effective_axis_dimension
+from ...tokenization_utils_base import PreTrainedTokenizerBase
+from ...utils import TensorType, logging
+
+
+logger = logging.get_logger(__name__)
+
+PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
+ # See all Perceiver models at https://huggingface.co/models?filter=perceiver
+}
+
+
+class PerceiverConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`PerceiverModel`]. It is used to instantiate an
+ Perceiver model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the Perceiver
+ [deepmind/language-perceiver](https://huggingface.co/deepmind/language-perceiver) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ num_latents (`int`, *optional*, defaults to 256):
+ The number of latents.
+ d_latents (`int`, *optional*, defaults to 1280):
+ Dimension of the latent embeddings.
+ d_model (`int`, *optional*, defaults to 768):
+ Dimension of the inputs. Should only be provided in case [*PerceiverTextPreprocessor*] is used or no
+ preprocessor is provided.
+ num_blocks (`int`, *optional*, defaults to 1):
+ Number of blocks in the Transformer encoder.
+ num_self_attends_per_block (`int`, *optional*, defaults to 26):
+ The number of self-attention layers per block.
+ num_self_attention_heads (`int`, *optional*, defaults to 8):
+ Number of attention heads for each self-attention layer in the Transformer encoder.
+ num_cross_attention_heads (`int`, *optional*, defaults to 8):
+ Number of attention heads for each cross-attention layer in the Transformer encoder.
+ qk_channels (`int`, *optional*):
+ Dimension to project the queries + keys before applying attention in the cross-attention and self-attention
+ layers of the encoder. Will default to preserving the dimension of the queries if not specified.
+ v_channels (`int`, *optional*):
+ Dimension to project the values before applying attention in the cross-attention and self-attention layers
+ of the encoder. Will default to preserving the dimension of the queries if not specified.
+ cross_attention_shape_for_attention (`str`, *optional*, defaults to `"kv"`):
+ Dimension to use when downsampling the queries and keys in the cross-attention layer of the encoder.
+ self_attention_widening_factor (`int`, *optional*, defaults to 1):
+ Dimension of the feed-forward layer in the cross-attention layer of the Transformer encoder.
+ cross_attention_widening_factor (`int`, *optional*, defaults to 1):
+ Dimension of the feed-forward layer in the self-attention layers of the Transformer encoder.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ use_query_residual (`float`, *optional*, defaults to `True`):
+ Whether to add a query residual in the cross-attention layer of the encoder.
+ vocab_size (`int`, *optional*, defaults to 262):
+ Vocabulary size for the masked language modeling model.
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
+ The maximum sequence length that the masked language modeling model might ever be used with. Typically set
+ this to something large just in case (e.g., 512 or 1024 or 2048).
+ image_size (`int`, *optional*, defaults to 56):
+ Size of the images after preprocessing, for [`PerceiverForImageClassificationLearned`].
+ train_size (`List[int]`, *optional*, defaults to `[368, 496]`):
+ Training size of the images for the optical flow model.
+ num_frames (`int`, *optional*, defaults to 16):
+ Number of video frames used for the multimodal autoencoding model.
+ audio_samples_per_frame (`int`, *optional*, defaults to 1920):
+ Number of audio samples per frame for the multimodal autoencoding model.
+ samples_per_patch (`int`, *optional*, defaults to 16):
+ Number of audio samples per patch when preprocessing the audio for the multimodal autoencoding model.
+ output_shape (`List[int]`, *optional*, defaults to `[1, 16, 224, 224]`):
+ Shape of the output (batch_size, num_frames, height, width) for the video decoder queries of the multimodal
+ autoencoding model. This excludes the channel dimension.
+ output_num_channels (`int`, *optional*, defaults to 512):
+ Number of output channels for each modalitiy decoder.
+
+ Example:
+
+ ```python
+ >>> from transformers import PerceiverModel, PerceiverConfig
+
+ >>> # Initializing a Perceiver deepmind/language-perceiver style configuration
+ >>> configuration = PerceiverConfig()
+
+ >>> # Initializing a model from the deepmind/language-perceiver style configuration
+ >>> model = PerceiverModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "perceiver"
+
+ def __init__(
+ self,
+ num_latents=256,
+ d_latents=1280,
+ d_model=768,
+ num_blocks=1,
+ num_self_attends_per_block=26,
+ num_self_attention_heads=8,
+ num_cross_attention_heads=8,
+ qk_channels=None,
+ v_channels=None,
+ cross_attention_shape_for_attention="kv",
+ self_attention_widening_factor=1,
+ cross_attention_widening_factor=1,
+ hidden_act="gelu",
+ attention_probs_dropout_prob=0.1,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ use_query_residual=True,
+ vocab_size=262,
+ max_position_embeddings=2048,
+ image_size=56,
+ train_size=[368, 496],
+ num_frames=16,
+ audio_samples_per_frame=1920,
+ samples_per_patch=16,
+ output_shape=[1, 16, 224, 224],
+ output_num_channels=512,
+ _label_trainable_num_channels=1024,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.num_latents = num_latents
+ self.d_latents = d_latents
+ self.d_model = d_model
+ self.num_blocks = num_blocks
+ self.num_self_attends_per_block = num_self_attends_per_block
+ self.num_self_attention_heads = num_self_attention_heads
+ self.num_cross_attention_heads = num_cross_attention_heads
+ self.qk_channels = qk_channels
+ self.v_channels = v_channels
+ self.cross_attention_shape_for_attention = cross_attention_shape_for_attention
+ self.self_attention_widening_factor = self_attention_widening_factor
+ self.cross_attention_widening_factor = cross_attention_widening_factor
+ self.hidden_act = hidden_act
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.use_query_residual = use_query_residual
+ # masked language modeling attributes
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ # image classification attributes
+ self.image_size = image_size
+ # flow attributes
+ self.train_size = train_size
+ # multimodal autoencoding attributes
+ self.num_frames = num_frames
+ self.audio_samples_per_frame = audio_samples_per_frame
+ self.samples_per_patch = samples_per_patch
+ self.output_shape = output_shape
+ self.output_num_channels = output_num_channels
+ self._label_trainable_num_channels = _label_trainable_num_channels
+
+
+class PerceiverOnnxConfig(OnnxConfig):
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ if self.task == "multiple-choice":
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
+ else:
+ dynamic_axis = {0: "batch", 1: "sequence"}
+ return OrderedDict(
+ [
+ ("inputs", dynamic_axis),
+ ("attention_mask", dynamic_axis),
+ ]
+ )
+
+ @property
+ def atol_for_validation(self) -> float:
+ return 1e-4
+
+ def generate_dummy_inputs(
+ self,
+ preprocessor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],
+ batch_size: int = -1,
+ seq_length: int = -1,
+ num_choices: int = -1,
+ is_pair: bool = False,
+ framework: Optional[TensorType] = None,
+ num_channels: int = 3,
+ image_width: int = 40,
+ image_height: int = 40,
+ ) -> Mapping[str, Any]:
+ # copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
+
+ if isinstance(preprocessor, PreTrainedTokenizerBase):
+ # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
+ batch_size = compute_effective_axis_dimension(
+ batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0
+ )
+ # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
+ token_to_add = preprocessor.num_special_tokens_to_add(is_pair)
+ seq_length = compute_effective_axis_dimension(
+ seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add
+ )
+ # Generate dummy inputs according to compute batch and sequence
+ dummy_input = [" ".join(["a"]) * seq_length] * batch_size
+ inputs = dict(preprocessor(dummy_input, return_tensors=framework))
+ inputs["inputs"] = inputs.pop("input_ids")
+ return inputs
+ elif isinstance(preprocessor, FeatureExtractionMixin) and preprocessor.model_input_names[0] == "pixel_values":
+ # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
+ batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
+ dummy_input = self._generate_dummy_images(batch_size, num_channels, image_height, image_width)
+ inputs = dict(preprocessor(images=dummy_input, return_tensors=framework))
+ inputs["inputs"] = inputs.pop("pixel_values")
+ return inputs
+ else:
+ raise ValueError(
+ "Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor."
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/convert_perceiver_haiku_to_pytorch.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/convert_perceiver_haiku_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ea97981275227a6a9dcc6dd984562fa8dbf31e5
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/convert_perceiver_haiku_to_pytorch.py
@@ -0,0 +1,468 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert Perceiver checkpoints originally implemented in Haiku."""
+
+
+import argparse
+import json
+import pickle
+from pathlib import Path
+
+import haiku as hk
+import numpy as np
+import requests
+import torch
+from huggingface_hub import hf_hub_download
+from PIL import Image
+
+from transformers import (
+ PerceiverConfig,
+ PerceiverForImageClassificationConvProcessing,
+ PerceiverForImageClassificationFourier,
+ PerceiverForImageClassificationLearned,
+ PerceiverForMaskedLM,
+ PerceiverForMultimodalAutoencoding,
+ PerceiverForOpticalFlow,
+ PerceiverImageProcessor,
+ PerceiverTokenizer,
+)
+from transformers.utils import logging
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+
+def prepare_img():
+ # We will verify our results on an image of a dog
+ url = "https://storage.googleapis.com/perceiver_io/dalmation.jpg"
+ im = Image.open(requests.get(url, stream=True).raw)
+ return im
+
+
+def rename_keys(state_dict, architecture):
+ for name in list(state_dict):
+ param = state_dict.pop(name)
+
+ # PREPROCESSORS
+ # rename text preprocessor embeddings (for MLM model)
+ name = name.replace("embed/embeddings", "input_preprocessor.embeddings.weight")
+ if name.startswith("trainable_position_encoding/pos_embs"):
+ name = name.replace(
+ "trainable_position_encoding/pos_embs", "input_preprocessor.position_embeddings.weight"
+ )
+
+ # rename image preprocessor embeddings (for image classification model with learned position embeddings)
+ name = name.replace("image_preprocessor/~/conv2_d/w", "input_preprocessor.convnet_1x1.weight")
+ name = name.replace("image_preprocessor/~/conv2_d/b", "input_preprocessor.convnet_1x1.bias")
+ name = name.replace(
+ "image_preprocessor/~_build_network_inputs/trainable_position_encoding/pos_embs",
+ "input_preprocessor.position_embeddings.position_embeddings",
+ )
+ name = name.replace(
+ "image_preprocessor/~_build_network_inputs/position_encoding_projector/linear/w",
+ "input_preprocessor.positions_projection.weight",
+ )
+ name = name.replace(
+ "image_preprocessor/~_build_network_inputs/position_encoding_projector/linear/b",
+ "input_preprocessor.positions_projection.bias",
+ )
+
+ # rename image preprocessor embeddings (for image classification model with conv processing)
+ if "counter" in name or "hidden" in name:
+ continue
+ name = name.replace(
+ "image_preprocessor/~/conv2_d_downsample/~/conv/w", "input_preprocessor.convnet.conv.weight"
+ )
+ name = name.replace(
+ "image_preprocessor/~/conv2_d_downsample/~/batchnorm/offset", "input_preprocessor.convnet.batchnorm.bias"
+ )
+ name = name.replace(
+ "image_preprocessor/~/conv2_d_downsample/~/batchnorm/scale", "input_preprocessor.convnet.batchnorm.weight"
+ )
+ name = name.replace(
+ "image_preprocessor/~/conv2_d_downsample/~/batchnorm/~/mean_ema/average",
+ "input_preprocessor.convnet.batchnorm.running_mean",
+ )
+ name = name.replace(
+ "image_preprocessor/~/conv2_d_downsample/~/batchnorm/~/var_ema/average",
+ "input_preprocessor.convnet.batchnorm.running_var",
+ )
+
+ # rename image preprocessor embeddings (for optical flow model)
+ name = name.replace("image_preprocessor/patches_linear/b", "input_preprocessor.conv_after_patches.bias")
+ name = name.replace("image_preprocessor/patches_linear/w", "input_preprocessor.conv_after_patches.weight")
+
+ # rename multimodal preprocessor embeddings
+ name = name.replace("multimodal_preprocessor/audio_mask_token/pos_embs", "input_preprocessor.mask.audio")
+ name = name.replace("multimodal_preprocessor/audio_padding/pos_embs", "input_preprocessor.padding.audio")
+ name = name.replace("multimodal_preprocessor/image_mask_token/pos_embs", "input_preprocessor.mask.image")
+ name = name.replace("multimodal_preprocessor/image_padding/pos_embs", "input_preprocessor.padding.image")
+ name = name.replace("multimodal_preprocessor/label_mask_token/pos_embs", "input_preprocessor.mask.label")
+ name = name.replace("multimodal_preprocessor/label_padding/pos_embs", "input_preprocessor.padding.label")
+
+ # DECODERS
+ # rename prefix of decoders
+ # multimodal autoencoding model
+ name = name.replace(
+ "multimodal_decoder/~/basic_decoder/cross_attention/", "decoder.decoder.decoding_cross_attention."
+ )
+ name = name.replace("multimodal_decoder/~decoder_query/audio_padding/pos_embs", "decoder.padding.audio")
+ name = name.replace("multimodal_decoder/~decoder_query/image_padding/pos_embs", "decoder.padding.image")
+ name = name.replace("multimodal_decoder/~decoder_query/label_padding/pos_embs", "decoder.padding.label")
+ name = name.replace("multimodal_decoder/~/basic_decoder/output/b", "decoder.decoder.final_layer.bias")
+ name = name.replace("multimodal_decoder/~/basic_decoder/output/w", "decoder.decoder.final_layer.weight")
+ if architecture == "multimodal_autoencoding":
+ name = name.replace(
+ "classification_decoder/~/basic_decoder/~/trainable_position_encoding/pos_embs",
+ "decoder.modalities.label.decoder.output_position_encodings.position_embeddings",
+ )
+ # flow model
+ name = name.replace(
+ "flow_decoder/~/basic_decoder/cross_attention/", "decoder.decoder.decoding_cross_attention."
+ )
+ name = name.replace("flow_decoder/~/basic_decoder/output/w", "decoder.decoder.final_layer.weight")
+ name = name.replace("flow_decoder/~/basic_decoder/output/b", "decoder.decoder.final_layer.bias")
+ # image models
+ name = name.replace(
+ "classification_decoder/~/basic_decoder/~/trainable_position_encoding/pos_embs",
+ "decoder.decoder.output_position_encodings.position_embeddings",
+ )
+ name = name.replace(
+ "basic_decoder/~/trainable_position_encoding/pos_embs",
+ "decoder.output_position_encodings.position_embeddings",
+ )
+ name = name.replace(
+ "classification_decoder/~/basic_decoder/cross_attention/", "decoder.decoder.decoding_cross_attention."
+ )
+ name = name.replace("classification_decoder/~/basic_decoder/output/b", "decoder.decoder.final_layer.bias")
+ name = name.replace("classification_decoder/~/basic_decoder/output/w", "decoder.decoder.final_layer.weight")
+ name = name = name.replace("classification_decoder/~/basic_decoder/~/", "decoder.decoder.")
+ name = name.replace("basic_decoder/cross_attention/", "decoder.decoding_cross_attention.")
+ name = name.replace("basic_decoder/~/", "decoder.")
+
+ # POSTPROCESSORS
+ name = name.replace(
+ "projection_postprocessor/linear/b", "output_postprocessor.modalities.image.classifier.bias"
+ )
+ name = name.replace(
+ "projection_postprocessor/linear/w", "output_postprocessor.modalities.image.classifier.weight"
+ )
+ name = name.replace(
+ "classification_postprocessor/linear/b", "output_postprocessor.modalities.label.classifier.bias"
+ )
+ name = name.replace(
+ "classification_postprocessor/linear/w", "output_postprocessor.modalities.label.classifier.weight"
+ )
+ name = name.replace("audio_postprocessor/linear/b", "output_postprocessor.modalities.audio.classifier.bias")
+ name = name.replace("audio_postprocessor/linear/w", "output_postprocessor.modalities.audio.classifier.weight")
+
+ # PERCEIVER MODEL
+
+ # rename latent embeddings
+ name = name.replace("perceiver_encoder/~/trainable_position_encoding/pos_embs", "embeddings.latents")
+ # rename latent embeddings (for multimodal model)
+ name = name.replace("encoder/~/trainable_position_encoding/pos_embs", "embeddings.latents")
+
+ # rename prefixes
+ if name.startswith("perceiver_encoder/~/"):
+ if "self_attention" in name:
+ suffix = "self_attends."
+ else:
+ suffix = ""
+ name = name.replace("perceiver_encoder/~/", "encoder." + suffix)
+ if name.startswith("encoder/~/"):
+ if "self_attention" in name:
+ suffix = "self_attends."
+ else:
+ suffix = ""
+ name = name.replace("encoder/~/", "encoder." + suffix)
+ # rename layernorm parameters
+ if "offset" in name:
+ name = name.replace("offset", "bias")
+ if "scale" in name:
+ name = name.replace("scale", "weight")
+ # in HuggingFace, the layernorm in between attention + MLP is just called "layernorm"
+ # rename layernorm in between attention + MLP of cross-attention
+ if "cross_attention" in name and "layer_norm_2" in name:
+ name = name.replace("layer_norm_2", "layernorm")
+ # rename layernorm in between attention + MLP of self-attention
+ if "self_attention" in name and "layer_norm_1" in name:
+ name = name.replace("layer_norm_1", "layernorm")
+
+ # in HuggingFace, the layernorms for queries + keys are called "layernorm1" and "layernorm2"
+ if "cross_attention" in name and "layer_norm_1" in name:
+ name = name.replace("layer_norm_1", "attention.self.layernorm2")
+ if "cross_attention" in name and "layer_norm" in name:
+ name = name.replace("layer_norm", "attention.self.layernorm1")
+ if "self_attention" in name and "layer_norm" in name:
+ name = name.replace("layer_norm", "attention.self.layernorm1")
+
+ # rename special characters by dots
+ name = name.replace("-", ".")
+ name = name.replace("/", ".")
+ # rename keys, queries, values and output of attention layers
+ if ("cross_attention" in name or "self_attention" in name) and "mlp" not in name:
+ if "linear.b" in name:
+ name = name.replace("linear.b", "self.query.bias")
+ if "linear.w" in name:
+ name = name.replace("linear.w", "self.query.weight")
+ if "linear_1.b" in name:
+ name = name.replace("linear_1.b", "self.key.bias")
+ if "linear_1.w" in name:
+ name = name.replace("linear_1.w", "self.key.weight")
+ if "linear_2.b" in name:
+ name = name.replace("linear_2.b", "self.value.bias")
+ if "linear_2.w" in name:
+ name = name.replace("linear_2.w", "self.value.weight")
+ if "linear_3.b" in name:
+ name = name.replace("linear_3.b", "output.dense.bias")
+ if "linear_3.w" in name:
+ name = name.replace("linear_3.w", "output.dense.weight")
+ if "self_attention_" in name:
+ name = name.replace("self_attention_", "")
+ if "self_attention" in name:
+ name = name.replace("self_attention", "0")
+ # rename dense layers of 2-layer MLP
+ if "mlp" in name:
+ if "linear.b" in name:
+ name = name.replace("linear.b", "dense1.bias")
+ if "linear.w" in name:
+ name = name.replace("linear.w", "dense1.weight")
+ if "linear_1.b" in name:
+ name = name.replace("linear_1.b", "dense2.bias")
+ if "linear_1.w" in name:
+ name = name.replace("linear_1.w", "dense2.weight")
+
+ # finally, TRANSPOSE if kernel and not embedding layer, and set value
+ if name[-6:] == "weight" and "embeddings" not in name:
+ param = np.transpose(param)
+
+ # if batchnorm, we need to squeeze it
+ if "batchnorm" in name:
+ param = np.squeeze(param)
+
+ if "embedding_decoder" not in name:
+ state_dict["perceiver." + name] = torch.from_numpy(param)
+ else:
+ state_dict[name] = torch.from_numpy(param)
+
+
+@torch.no_grad()
+def convert_perceiver_checkpoint(pickle_file, pytorch_dump_folder_path, architecture="MLM"):
+ """
+ Copy/paste/tweak model's weights to our Perceiver structure.
+ """
+
+ # load parameters as FlatMapping data structure
+ with open(pickle_file, "rb") as f:
+ checkpoint = pickle.loads(f.read())
+
+ state = None
+ if isinstance(checkpoint, dict) and architecture in [
+ "image_classification",
+ "image_classification_fourier",
+ "image_classification_conv",
+ ]:
+ # the image classification_conv checkpoint also has batchnorm states (running_mean and running_var)
+ params = checkpoint["params"]
+ state = checkpoint["state"]
+ else:
+ params = checkpoint
+
+ # turn into initial state dict
+ state_dict = {}
+ for scope_name, parameters in hk.data_structures.to_mutable_dict(params).items():
+ for param_name, param in parameters.items():
+ state_dict[scope_name + "/" + param_name] = param
+
+ if state is not None:
+ # add state variables
+ for scope_name, parameters in hk.data_structures.to_mutable_dict(state).items():
+ for param_name, param in parameters.items():
+ state_dict[scope_name + "/" + param_name] = param
+
+ # rename keys
+ rename_keys(state_dict, architecture=architecture)
+
+ # load HuggingFace model
+ config = PerceiverConfig()
+ subsampling = None
+ repo_id = "huggingface/label-files"
+ if architecture == "MLM":
+ config.qk_channels = 8 * 32
+ config.v_channels = 1280
+ model = PerceiverForMaskedLM(config)
+ elif "image_classification" in architecture:
+ config.num_latents = 512
+ config.d_latents = 1024
+ config.d_model = 512
+ config.num_blocks = 8
+ config.num_self_attends_per_block = 6
+ config.num_cross_attention_heads = 1
+ config.num_self_attention_heads = 8
+ config.qk_channels = None
+ config.v_channels = None
+ # set labels
+ config.num_labels = 1000
+ filename = "imagenet-1k-id2label.json"
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
+ id2label = {int(k): v for k, v in id2label.items()}
+ config.id2label = id2label
+ config.label2id = {v: k for k, v in id2label.items()}
+ if architecture == "image_classification":
+ config.image_size = 224
+ model = PerceiverForImageClassificationLearned(config)
+ elif architecture == "image_classification_fourier":
+ config.d_model = 261
+ model = PerceiverForImageClassificationFourier(config)
+ elif architecture == "image_classification_conv":
+ config.d_model = 322
+ model = PerceiverForImageClassificationConvProcessing(config)
+ else:
+ raise ValueError(f"Architecture {architecture} not supported")
+ elif architecture == "optical_flow":
+ config.num_latents = 2048
+ config.d_latents = 512
+ config.d_model = 322
+ config.num_blocks = 1
+ config.num_self_attends_per_block = 24
+ config.num_self_attention_heads = 16
+ config.num_cross_attention_heads = 1
+ model = PerceiverForOpticalFlow(config)
+ elif architecture == "multimodal_autoencoding":
+ config.num_latents = 28 * 28 * 1
+ config.d_latents = 512
+ config.d_model = 704
+ config.num_blocks = 1
+ config.num_self_attends_per_block = 8
+ config.num_self_attention_heads = 8
+ config.num_cross_attention_heads = 1
+ config.num_labels = 700
+ # define dummy inputs + subsampling (as each forward pass is only on a chunk of image + audio data)
+ images = torch.randn((1, 16, 3, 224, 224))
+ audio = torch.randn((1, 30720, 1))
+ nchunks = 128
+ image_chunk_size = np.prod((16, 224, 224)) // nchunks
+ audio_chunk_size = audio.shape[1] // config.samples_per_patch // nchunks
+ # process the first chunk
+ chunk_idx = 0
+ subsampling = {
+ "image": torch.arange(image_chunk_size * chunk_idx, image_chunk_size * (chunk_idx + 1)),
+ "audio": torch.arange(audio_chunk_size * chunk_idx, audio_chunk_size * (chunk_idx + 1)),
+ "label": None,
+ }
+ model = PerceiverForMultimodalAutoencoding(config)
+ # set labels
+ filename = "kinetics700-id2label.json"
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
+ id2label = {int(k): v for k, v in id2label.items()}
+ config.id2label = id2label
+ config.label2id = {v: k for k, v in id2label.items()}
+ else:
+ raise ValueError(f"Architecture {architecture} not supported")
+ model.eval()
+
+ # load weights
+ model.load_state_dict(state_dict)
+
+ # prepare dummy input
+ input_mask = None
+ if architecture == "MLM":
+ tokenizer = PerceiverTokenizer.from_pretrained("/Users/NielsRogge/Documents/Perceiver/Tokenizer files")
+ text = "This is an incomplete sentence where some words are missing."
+ encoding = tokenizer(text, padding="max_length", return_tensors="pt")
+ # mask " missing.". Note that the model performs much better if the masked chunk starts with a space.
+ encoding.input_ids[0, 51:60] = tokenizer.mask_token_id
+ inputs = encoding.input_ids
+ input_mask = encoding.attention_mask
+ elif architecture in ["image_classification", "image_classification_fourier", "image_classification_conv"]:
+ image_processor = PerceiverImageProcessor()
+ image = prepare_img()
+ encoding = image_processor(image, return_tensors="pt")
+ inputs = encoding.pixel_values
+ elif architecture == "optical_flow":
+ inputs = torch.randn(1, 2, 27, 368, 496)
+ elif architecture == "multimodal_autoencoding":
+ images = torch.randn((1, 16, 3, 224, 224))
+ audio = torch.randn((1, 30720, 1))
+ inputs = {"image": images, "audio": audio, "label": torch.zeros((images.shape[0], 700))}
+
+ # forward pass
+ if architecture == "multimodal_autoencoding":
+ outputs = model(inputs=inputs, attention_mask=input_mask, subsampled_output_points=subsampling)
+ else:
+ outputs = model(inputs=inputs, attention_mask=input_mask)
+ logits = outputs.logits
+
+ # verify logits
+ if not isinstance(logits, dict):
+ print("Shape of logits:", logits.shape)
+ else:
+ for k, v in logits.items():
+ print(f"Shape of logits of modality {k}", v.shape)
+
+ if architecture == "MLM":
+ expected_slice = torch.tensor(
+ [[-11.8336, -11.6850, -11.8483], [-12.8149, -12.5863, -12.7904], [-12.8440, -12.6410, -12.8646]]
+ )
+ assert torch.allclose(logits[0, :3, :3], expected_slice)
+ masked_tokens_predictions = logits[0, 51:60].argmax(dim=-1).tolist()
+ expected_list = [38, 115, 111, 121, 121, 111, 116, 109, 52]
+ assert masked_tokens_predictions == expected_list
+ print("Greedy predictions:")
+ print(masked_tokens_predictions)
+ print()
+ print("Predicted string:")
+ print(tokenizer.decode(masked_tokens_predictions))
+
+ elif architecture in ["image_classification", "image_classification_fourier", "image_classification_conv"]:
+ print("Predicted class:", model.config.id2label[logits.argmax(-1).item()])
+
+ # Finally, save files
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
+ print(f"Saving model to {pytorch_dump_folder_path}")
+ model.save_pretrained(pytorch_dump_folder_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--pickle_file",
+ type=str,
+ default=None,
+ required=True,
+ help="Path to local pickle file of a Perceiver checkpoint you'd like to convert.",
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path",
+ default=None,
+ type=str,
+ required=True,
+ help="Path to the output PyTorch model directory, provided as a string.",
+ )
+ parser.add_argument(
+ "--architecture",
+ default="MLM",
+ type=str,
+ help="""
+ Architecture, provided as a string. One of 'MLM', 'image_classification', image_classification_fourier',
+ image_classification_fourier', 'optical_flow' or 'multimodal_autoencoding'.
+ """,
+ )
+
+ args = parser.parse_args()
+ convert_perceiver_checkpoint(args.pickle_file, args.pytorch_dump_folder_path, args.architecture)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/feature_extraction_perceiver.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/feature_extraction_perceiver.py
new file mode 100644
index 0000000000000000000000000000000000000000..35f2a6c5c9e72d44ec1b9fdb62aeb452e7581a4c
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/feature_extraction_perceiver.py
@@ -0,0 +1,33 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Feature extractor class for Perceiver."""
+
+import warnings
+
+from ...utils import logging
+from .image_processing_perceiver import PerceiverImageProcessor
+
+
+logger = logging.get_logger(__name__)
+
+
+class PerceiverFeatureExtractor(PerceiverImageProcessor):
+ def __init__(self, *args, **kwargs) -> None:
+ warnings.warn(
+ "The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
+ " Please use PerceiverImageProcessor instead.",
+ FutureWarning,
+ )
+ super().__init__(*args, **kwargs)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/image_processing_perceiver.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/image_processing_perceiver.py
new file mode 100644
index 0000000000000000000000000000000000000000..02dd527e437be7e91f59f227354b01865db58ca8
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/image_processing_perceiver.py
@@ -0,0 +1,367 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Image processor class for Perceiver."""
+
+from typing import Dict, List, Optional, Union
+
+import numpy as np
+
+from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
+from ...image_transforms import center_crop, resize, to_channel_dimension_format
+from ...image_utils import (
+ IMAGENET_DEFAULT_MEAN,
+ IMAGENET_DEFAULT_STD,
+ ChannelDimension,
+ ImageInput,
+ PILImageResampling,
+ get_image_size,
+ infer_channel_dimension_format,
+ is_scaled_image,
+ make_list_of_images,
+ to_numpy_array,
+ valid_images,
+ validate_kwargs,
+ validate_preprocess_arguments,
+)
+from ...utils import TensorType, is_vision_available, logging
+
+
+if is_vision_available():
+ import PIL
+
+
+logger = logging.get_logger(__name__)
+
+
+class PerceiverImageProcessor(BaseImageProcessor):
+ r"""
+ Constructs a Perceiver image processor.
+
+ Args:
+ do_center_crop (`bool`, `optional`, defaults to `True`):
+ Whether or not to center crop the image. If the input size if smaller than `crop_size` along any edge, the
+ image will be padded with zeros and then center cropped. Can be overridden by the `do_center_crop`
+ parameter in the `preprocess` method.
+ crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 256, "width": 256}`):
+ Desired output size when applying center-cropping. Can be overridden by the `crop_size` parameter in the
+ `preprocess` method.
+ do_resize (`bool`, *optional*, defaults to `True`):
+ Whether to resize the image to `(size["height"], size["width"])`. Can be overridden by the `do_resize`
+ parameter in the `preprocess` method.
+ size (`Dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`):
+ Size of the image after resizing. Can be overridden by the `size` parameter in the `preprocess` method.
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
+ Defines the resampling filter to use if resizing the image. Can be overridden by the `resample` parameter
+ in the `preprocess` method.
+ do_rescale (`bool`, *optional*, defaults to `True`):
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
+ parameter in the `preprocess` method.
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
+ Defines the scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter
+ in the `preprocess` method.
+ do_normalize:
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
+ method.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
+ """
+
+ model_input_names = ["pixel_values"]
+
+ def __init__(
+ self,
+ do_center_crop: bool = True,
+ crop_size: Dict[str, int] = None,
+ do_resize: bool = True,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
+ do_rescale: bool = True,
+ rescale_factor: Union[int, float] = 1 / 255,
+ do_normalize: bool = True,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ **kwargs,
+ ) -> None:
+ super().__init__(**kwargs)
+ crop_size = crop_size if crop_size is not None else {"height": 256, "width": 256}
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
+ size = size if size is not None else {"height": 224, "width": 224}
+ size = get_size_dict(size)
+
+ self.do_center_crop = do_center_crop
+ self.crop_size = crop_size
+ self.do_resize = do_resize
+ self.size = size
+ self.resample = resample
+ self.do_rescale = do_rescale
+ self.rescale_factor = rescale_factor
+ self.do_normalize = do_normalize
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
+ self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
+ self._valid_processor_keys = [
+ "images",
+ "do_center_crop",
+ "crop_size",
+ "do_resize",
+ "size",
+ "resample",
+ "do_rescale",
+ "rescale_factor",
+ "do_normalize",
+ "image_mean",
+ "image_std",
+ "return_tensors",
+ "data_format",
+ "input_data_format",
+ ]
+
+ def center_crop(
+ self,
+ image: np.ndarray,
+ crop_size: Dict[str, int],
+ size: Optional[int] = None,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> np.ndarray:
+ """
+ Center crop an image to `(size["height"] / crop_size["height"] * min_dim, size["width"] / crop_size["width"] *
+ min_dim)`. Where `min_dim = min(size["height"], size["width"])`.
+
+ If the input size is smaller than `crop_size` along any edge, the image will be padded with zeros and then
+ center cropped.
+
+ Args:
+ image (`np.ndarray`):
+ Image to center crop.
+ crop_size (`Dict[str, int]`):
+ Desired output size after applying the center crop.
+ size (`Dict[str, int]`, *optional*):
+ Size of the image after resizing. If not provided, the self.size attribute will be used.
+ data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
+ input_data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred.
+ """
+ size = self.size if size is None else size
+ size = get_size_dict(size)
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
+
+ height, width = get_image_size(image, channel_dim=input_data_format)
+ min_dim = min(height, width)
+ cropped_height = (size["height"] / crop_size["height"]) * min_dim
+ cropped_width = (size["width"] / crop_size["width"]) * min_dim
+ return center_crop(
+ image,
+ size=(cropped_height, cropped_width),
+ data_format=data_format,
+ input_data_format=input_data_format,
+ **kwargs,
+ )
+
+ # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize with PILImageResampling.BILINEAR->PILImageResampling.BICUBIC
+ def resize(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> np.ndarray:
+ """
+ Resize an image to `(size["height"], size["width"])`.
+
+ Args:
+ image (`np.ndarray`):
+ Image to resize.
+ size (`Dict[str, int]`):
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
+ data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
+ image is used. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+
+ Returns:
+ `np.ndarray`: The resized image.
+ """
+ size = get_size_dict(size)
+ if "height" not in size or "width" not in size:
+ raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
+ output_size = (size["height"], size["width"])
+ return resize(
+ image,
+ size=output_size,
+ resample=resample,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ **kwargs,
+ )
+
+ def preprocess(
+ self,
+ images: ImageInput,
+ do_center_crop: Optional[bool] = None,
+ crop_size: Optional[Dict[str, int]] = None,
+ do_resize: Optional[bool] = None,
+ size: Optional[Dict[str, int]] = None,
+ resample: PILImageResampling = None,
+ do_rescale: Optional[bool] = None,
+ rescale_factor: Optional[float] = None,
+ do_normalize: Optional[bool] = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ data_format: ChannelDimension = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> PIL.Image.Image:
+ """
+ Preprocess an image or batch of images.
+
+ Args:
+ images (`ImageInput`):
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
+ Whether to center crop the image to `crop_size`.
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
+ Desired output size after applying the center crop.
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
+ Whether to resize the image.
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
+ Size of the image after resizing.
+ resample (`int`, *optional*, defaults to `self.resample`):
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
+ has an effect if `do_resize` is set to `True`.
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
+ Whether to rescale the image.
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
+ Whether to normalize the image.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
+ Image mean.
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
+ Image standard deviation.
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Can be one of:
+ - Unset: Return a list of `np.ndarray`.
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format for the output image. Can be one of:
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ """
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
+ crop_size = crop_size if crop_size is not None else self.crop_size
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
+ do_resize = do_resize if do_resize is not None else self.do_resize
+ size = size if size is not None else self.size
+ size = get_size_dict(size)
+ resample = resample if resample is not None else self.resample
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
+ image_mean = image_mean if image_mean is not None else self.image_mean
+ image_std = image_std if image_std is not None else self.image_std
+
+ images = make_list_of_images(images)
+
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
+
+ if not valid_images(images):
+ raise ValueError(
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+ validate_preprocess_arguments(
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ do_center_crop=do_center_crop,
+ crop_size=crop_size,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ )
+
+ # All transformations expect numpy arrays.
+ images = [to_numpy_array(image) for image in images]
+
+ if is_scaled_image(images[0]) and do_rescale:
+ logger.warning_once(
+ "It looks like you are trying to rescale already rescaled images. If the input"
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
+ )
+
+ if input_data_format is None:
+ # We assume that all images have the same channel dimension format.
+ input_data_format = infer_channel_dimension_format(images[0])
+
+ if do_center_crop:
+ images = [
+ self.center_crop(image, crop_size, size=size, input_data_format=input_data_format) for image in images
+ ]
+
+ if do_resize:
+ images = [
+ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ if do_rescale:
+ images = [
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ if do_normalize:
+ images = [
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ images = [
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
+ ]
+
+ data = {"pixel_values": images}
+ return BatchFeature(data=data, tensor_type=return_tensors)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/modeling_perceiver.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/modeling_perceiver.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb7ac2bc3139e13c7cc99213c9563f11c7831016
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/modeling_perceiver.py
@@ -0,0 +1,3437 @@
+# coding=utf-8
+# Copyright 2021 Deepmind and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch Perceiver model."""
+
+import abc
+import math
+from dataclasses import dataclass
+from functools import reduce
+from operator import __add__
+from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple, Union
+
+import numpy as np
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import BaseModelOutputWithCrossAttentions
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, meshgrid, prune_linear_layer
+from ...utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_perceiver import PerceiverConfig
+
+
+ModalitySizeType = Mapping[str, int]
+PreprocessorOutputType = Tuple[torch.Tensor, Optional[torch.Tensor], torch.Tensor]
+PreprocessorType = Callable[..., PreprocessorOutputType]
+PostprocessorType = Callable[..., Any]
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "deepmind/language-perceiver"
+_CONFIG_FOR_DOC = "PerceiverConfig"
+
+PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "deepmind/language-perceiver",
+ # See all Perceiver models at https://huggingface.co/models?filter=perceiver
+]
+
+
+@dataclass
+class PerceiverModelOutput(ModelOutput):
+ """
+ Base class for Perceiver base model's outputs, with potential hidden states, attentions and cross-attentions.
+
+ Args:
+ logits (`torch.FloatTensor` of shape `(batch_size, num_labels)`):
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
+ plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax,
+ used to compute the weighted average in the cross-attention heads.
+ """
+
+ logits: torch.FloatTensor = None
+ last_hidden_state: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+ cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class PerceiverDecoderOutput(ModelOutput):
+ """
+ Base class for Perceiver decoder outputs, with potential cross-attentions.
+
+ Args:
+ logits (`torch.FloatTensor` of shape `(batch_size, num_labels)`):
+ Output of the basic decoder.
+ cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax,
+ used to compute the weighted average in the cross-attention heads.
+ """
+
+ logits: torch.FloatTensor = None
+ cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class PerceiverMaskedLMOutput(ModelOutput):
+ """
+ Base class for Perceiver's masked language model outputs.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Masked language modeling (MLM) loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
+ plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, num_latents,
+ num_latents)`. Attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax,
+ used to compute the weighted average in the cross-attention heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+ cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class PerceiverClassifierOutput(ModelOutput):
+ """
+ Base class for Perceiver's outputs of sequence/image classification models, optical flow and multimodal
+ autoencoding.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Classification (or regression if config.num_labels==1) loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
+ plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax,
+ used to compute the weighted average in the cross-attention heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+ cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+class PerceiverEmbeddings(nn.Module):
+ """Construct the latent embeddings."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.latents = nn.Parameter(torch.randn(config.num_latents, config.d_latents))
+
+ def forward(self, batch_size: int):
+ return self.latents.expand(batch_size, -1, -1) # Thanks, Phil Wang
+
+
+class PerceiverSelfAttention(nn.Module):
+ """Multi-headed {cross, self}-attention. Can be used both in the encoder as well as in the decoder."""
+
+ def __init__(
+ self,
+ config,
+ is_cross_attention=False,
+ qk_channels=None,
+ v_channels=None,
+ num_heads=1,
+ q_dim=None,
+ kv_dim=None,
+ ):
+ super().__init__()
+ self.num_heads = num_heads
+ # Q and K must have the same number of channels.
+ # Default to preserving Q's input's shape.
+ if qk_channels is None:
+ qk_channels = q_dim
+ # V's num_channels determines the shape of the output of QKV-attention.
+ # Default to the same number of channels used in the key-query operation.
+ if v_channels is None:
+ v_channels = qk_channels
+ if qk_channels % num_heads != 0:
+ raise ValueError(f"qk_channels ({qk_channels}) must be divisible by num_heads ({num_heads}).")
+ if v_channels % num_heads != 0:
+ raise ValueError(f"v_channels ({v_channels}) must be divisible by num_heads ({num_heads}).")
+
+ self.qk_channels = qk_channels
+ self.v_channels = v_channels
+ self.qk_channels_per_head = self.qk_channels // num_heads
+ self.v_channels_per_head = self.v_channels // num_heads
+
+ # Layer normalization
+ self.layernorm1 = nn.LayerNorm(q_dim)
+ self.layernorm2 = nn.LayerNorm(kv_dim) if is_cross_attention else nn.Identity()
+
+ # Projection matrices
+ self.query = nn.Linear(q_dim, qk_channels)
+ self.key = nn.Linear(kv_dim, qk_channels)
+ self.value = nn.Linear(kv_dim, v_channels)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+
+ def transpose_for_scores(self, x, channels_per_head):
+ new_x_shape = x.size()[:-1] + (self.num_heads, channels_per_head)
+ x = x.view(*new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs: Optional[torch.FloatTensor] = None,
+ inputs_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ hidden_states = self.layernorm1(hidden_states)
+ inputs = self.layernorm2(inputs)
+
+ # Project queries, keys and values to a common feature dimension. If this is instantiated as a cross-attention module,
+ # the keys and values come from the inputs; the attention mask needs to be such that the inputs's non-relevant tokens are not attended to.
+ is_cross_attention = inputs is not None
+ queries = self.query(hidden_states)
+
+ if is_cross_attention:
+ keys = self.key(inputs)
+ values = self.value(inputs)
+ attention_mask = inputs_mask
+ else:
+ keys = self.key(hidden_states)
+ values = self.value(hidden_states)
+
+ # Reshape channels for multi-head attention.
+ # We reshape from (batch_size, time, channels) to (batch_size, num_heads, time, channels per head)
+ queries = self.transpose_for_scores(queries, self.qk_channels_per_head)
+ keys = self.transpose_for_scores(keys, self.qk_channels_per_head)
+ values = self.transpose_for_scores(values, self.v_channels_per_head)
+
+ # Take the dot product between the queries and keys to get the raw attention scores.
+ attention_scores = torch.matmul(queries, keys.transpose(-1, -2))
+
+ batch_size, num_heads, seq_len, q_head_dim = queries.shape
+ _, _, _, v_head_dim = values.shape
+ hiddens = self.num_heads * v_head_dim
+
+ attention_scores = attention_scores / math.sqrt(q_head_dim)
+
+ if attention_mask is not None:
+ # Apply the attention mask (precomputed for all layers in PerceiverModel forward() function)
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.Softmax(dim=-1)(attention_scores)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, values)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (hiddens,)
+ context_layer = context_layer.view(*new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ return outputs
+
+
+class PerceiverSelfOutput(nn.Module):
+ def __init__(self, config, input_channels, output_channels):
+ super().__init__()
+ self.dense = nn.Linear(input_channels, output_channels)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ return hidden_states
+
+
+class PerceiverAttention(nn.Module):
+ """Attention module, including a dense block."""
+
+ def __init__(
+ self,
+ config,
+ is_cross_attention=False,
+ qk_channels=None,
+ v_channels=None,
+ num_heads=1,
+ q_dim=None,
+ kv_dim=None,
+ use_query_residual=True,
+ ):
+ super().__init__()
+ # MultiHead attention
+ if is_cross_attention and qk_channels is None:
+ if config.cross_attention_shape_for_attention == "q":
+ qk_channels = q_dim
+ elif config.cross_attention_shape_for_attention == "kv":
+ qk_channels = kv_dim
+ else:
+ raise ValueError(
+ f"Unknown value {config.cross_attention_shape_for_attention} for "
+ "cross_attention_shape_for_attention."
+ )
+ else:
+ if qk_channels is None:
+ qk_channels = q_dim
+ if v_channels is None:
+ v_channels = qk_channels
+ self.self = PerceiverSelfAttention(
+ config,
+ is_cross_attention=is_cross_attention,
+ qk_channels=qk_channels,
+ v_channels=v_channels,
+ num_heads=num_heads,
+ q_dim=q_dim,
+ kv_dim=kv_dim,
+ )
+ # dense block
+ output_channels = None
+ if is_cross_attention:
+ output_channels = q_dim
+ else:
+ if output_channels is None:
+ output_channels = v_channels
+ self.output = PerceiverSelfOutput(config, input_channels=self.self.v_channels, output_channels=output_channels)
+ self.use_query_residual = use_query_residual
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.self.query = prune_linear_layer(self.self.query, index)
+ self.self.key = prune_linear_layer(self.self.key, index)
+ self.self.value = prune_linear_layer(self.self.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs: Optional[torch.FloatTensor] = None,
+ inputs_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ self_outputs = self.self(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ inputs,
+ inputs_mask,
+ output_attentions,
+ )
+
+ # Output projection
+ attention_output = self.output(self_outputs[0])
+
+ # Optionally include a residual to the original queries.
+ # Consider omitting the residual if the semantics of query and output
+ # are different, e.g. if queries are positions and outputs are pixels.
+ if self.use_query_residual:
+ attention_output = attention_output + hidden_states
+
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+class PerceiverMLP(nn.Module):
+ """A Transformer-style dense module to follow attention."""
+
+ def __init__(self, config, input_size, widening_factor):
+ super().__init__()
+ self.dense1 = nn.Linear(input_size, widening_factor * input_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+ self.dense2 = nn.Linear(widening_factor * input_size, input_size)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense1(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ hidden_states = self.dense2(hidden_states)
+ return hidden_states
+
+
+class PerceiverLayer(nn.Module):
+ def __init__(
+ self,
+ config,
+ is_cross_attention=False,
+ qk_channels=None,
+ v_channels=None,
+ num_heads=1,
+ q_dim=None,
+ kv_dim=None,
+ widening_factor=4,
+ use_query_residual=True,
+ ):
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.attention = PerceiverAttention(
+ config,
+ is_cross_attention=is_cross_attention,
+ qk_channels=qk_channels,
+ v_channels=v_channels,
+ num_heads=num_heads,
+ q_dim=q_dim,
+ kv_dim=kv_dim,
+ use_query_residual=use_query_residual,
+ )
+ self.layernorm = nn.LayerNorm(q_dim)
+ self.mlp = PerceiverMLP(config, input_size=q_dim, widening_factor=widening_factor)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs: Optional[torch.FloatTensor] = None,
+ inputs_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ attention_outputs = self.attention(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ inputs,
+ inputs_mask,
+ output_attentions,
+ )
+ attention_output = attention_outputs[0]
+
+ outputs = attention_outputs[1:] # add attentions if we output attention weights
+
+ layer_output = apply_chunking_to_forward(
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
+ )
+
+ layer_output = layer_output + attention_output # residual connection
+
+ outputs = (layer_output,) + outputs
+
+ return outputs
+
+ def feed_forward_chunk(self, attention_output):
+ layer_output = self.layernorm(attention_output)
+ layer_output = self.mlp(layer_output)
+ return layer_output
+
+
+class PerceiverEncoder(nn.Module):
+ """The Perceiver Encoder: a scalable, fully attentional encoder."""
+
+ def __init__(self, config, kv_dim=None):
+ super().__init__()
+ self.config = config
+
+ # Check that we can use multihead-attention with these shapes.
+ if config.d_latents % config.num_self_attention_heads != 0:
+ raise ValueError(
+ f"num_z_channels ({config.d_latents}) must be divisible by"
+ f" num_self_attend_heads ({config.num_self_attention_heads})."
+ )
+ if config.d_latents % config.num_cross_attention_heads != 0:
+ raise ValueError(
+ f"num_z_channels ({config.d_latents}) must be divisible by"
+ f" num_cross_attend_heads ({config.num_cross_attention_heads})."
+ )
+
+ # Construct the cross attention layer.
+ self.cross_attention = PerceiverLayer(
+ config,
+ is_cross_attention=True,
+ qk_channels=config.qk_channels,
+ v_channels=config.v_channels,
+ num_heads=config.num_cross_attention_heads,
+ q_dim=config.d_latents,
+ kv_dim=kv_dim,
+ widening_factor=config.cross_attention_widening_factor,
+ use_query_residual=config.use_query_residual,
+ )
+
+ # Construct a single block of self-attention layers.
+ # We get deeper architectures by applying this block more than once.
+ self_attention_layers = []
+ for _ in range(config.num_self_attends_per_block):
+ layer = PerceiverLayer(
+ config,
+ is_cross_attention=False,
+ qk_channels=config.qk_channels,
+ v_channels=config.v_channels,
+ num_heads=config.num_self_attention_heads,
+ q_dim=config.d_latents,
+ kv_dim=config.d_latents,
+ widening_factor=config.self_attention_widening_factor,
+ )
+ self_attention_layers.append(layer)
+
+ self.self_attends = nn.ModuleList(self_attention_layers)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs: Optional[torch.FloatTensor] = None,
+ inputs_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = False,
+ output_hidden_states: Optional[bool] = False,
+ return_dict: Optional[bool] = True,
+ ) -> Union[Tuple, BaseModelOutputWithCrossAttentions]:
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+ all_cross_attentions = () if output_attentions else None
+
+ # Apply the cross-attention between the latents (hidden_states) and inputs:
+ layer_outputs = self.cross_attention(
+ hidden_states,
+ attention_mask=attention_mask,
+ head_mask=None,
+ inputs=inputs,
+ inputs_mask=inputs_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_cross_attentions = all_cross_attentions + (layer_outputs[1],)
+
+ # Apply the block of self-attention layers more than once:
+ for _ in range(self.config.num_blocks):
+ for i, layer_module in enumerate(self.self_attends):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+
+ layer_outputs = layer_module(
+ hidden_states,
+ attention_mask=attention_mask,
+ head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [hidden_states, all_hidden_states, all_self_attentions, all_cross_attentions]
+ if v is not None
+ )
+ return BaseModelOutputWithCrossAttentions(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+class PerceiverPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = PerceiverConfig
+ base_model_prefix = "perceiver"
+ main_input_name = "inputs"
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif hasattr(module, "latents"):
+ module.latents.data.normal_(mean=0.0, std=self.config.initializer_range)
+ elif hasattr(module, "position_embeddings") and isinstance(module, PerceiverTrainablePositionEncoding):
+ module.position_embeddings.data.normal_(mean=0.0, std=self.config.initializer_range)
+ elif isinstance(module, nn.ParameterDict):
+ for modality in module.keys():
+ module[modality].data.normal_(mean=0.0, std=self.config.initializer_range)
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+PERCEIVER_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`PerceiverConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+PERCEIVER_MODEL_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`PerceiverConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+ decoder (*DecoderType*, *optional*):
+ Optional decoder to use to decode the latent representation of the encoder. Examples include
+ *transformers.models.perceiver.modeling_perceiver.PerceiverBasicDecoder*,
+ *transformers.models.perceiver.modeling_perceiver.PerceiverClassificationDecoder*,
+ *transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder*.
+ input_preprocessor (*PreprocessorType*, *optional*):
+ Optional input preprocessor to use. Examples include
+ *transformers.models.perceiver.modeling_perceiver.PerceiverImagePreprocessor*,
+ *transformers.models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor*,
+ *transformers.models.perceiver.modeling_perceiver.PerceiverTextPreprocessor*,
+ *transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor*.
+ output_postprocessor (*PostprocessorType*, *optional*):
+ Optional output postprocessor to use. Examples include
+ *transformers.models.perceiver.modeling_perceiver.PerceiverImagePostprocessor*,
+ *transformers.models.perceiver.modeling_perceiver.PerceiverAudioPostprocessor*,
+ *transformers.models.perceiver.modeling_perceiver.PerceiverClassificationPostprocessor*,
+ *transformers.models.perceiver.modeling_perceiver.PerceiverProjectionPostprocessor*,
+ *transformers.models.perceiver.modeling_perceiver.PerceiverMultimodalPostprocessor*.
+
+ Note that you can define your own decoders, preprocessors and/or postprocessors to fit your use-case.
+"""
+
+PERCEIVER_INPUTS_DOCSTRING = r"""
+ Args:
+ inputs (`torch.FloatTensor`):
+ Inputs to the perceiver. Can be anything: images, text, audio, video, etc.
+ attention_mask (`torch.FloatTensor` of shape `{0}`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ """The Perceiver: a scalable, fully attentional architecture.""",
+ PERCEIVER_MODEL_START_DOCSTRING,
+)
+class PerceiverModel(PerceiverPreTrainedModel):
+ def __init__(
+ self,
+ config,
+ decoder=None,
+ input_preprocessor: PreprocessorType = None,
+ output_postprocessor: PostprocessorType = None,
+ ):
+ super().__init__(config)
+ self.config = config
+
+ self.input_preprocessor = input_preprocessor
+ self.output_postprocessor = output_postprocessor
+ self.embeddings = PerceiverEmbeddings(config)
+ self.encoder = PerceiverEncoder(
+ config, kv_dim=input_preprocessor.num_channels if input_preprocessor is not None else config.d_model
+ )
+ self.decoder = decoder
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.latents
+
+ def set_input_embeddings(self, value):
+ self.embeddings.latents = value
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(PERCEIVER_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
+ @replace_return_docstrings(output_type=PerceiverModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ inputs: torch.FloatTensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ subsampled_output_points: Optional[Dict[str, torch.Tensor]] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, PerceiverModelOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import PerceiverConfig, PerceiverTokenizer, PerceiverImageProcessor, PerceiverModel
+ >>> from transformers.models.perceiver.modeling_perceiver import (
+ ... PerceiverTextPreprocessor,
+ ... PerceiverImagePreprocessor,
+ ... PerceiverClassificationDecoder,
+ ... )
+ >>> import torch
+ >>> import requests
+ >>> from PIL import Image
+
+ >>> # EXAMPLE 1: using the Perceiver to classify texts
+ >>> # - we define a TextPreprocessor, which can be used to embed tokens
+ >>> # - we define a ClassificationDecoder, which can be used to decode the
+ >>> # final hidden states of the latents to classification logits
+ >>> # using trainable position embeddings
+ >>> config = PerceiverConfig()
+ >>> preprocessor = PerceiverTextPreprocessor(config)
+ >>> decoder = PerceiverClassificationDecoder(
+ ... config,
+ ... num_channels=config.d_latents,
+ ... trainable_position_encoding_kwargs=dict(num_channels=config.d_latents, index_dims=1),
+ ... use_query_residual=True,
+ ... )
+ >>> model = PerceiverModel(config, input_preprocessor=preprocessor, decoder=decoder)
+
+ >>> # you can then do a forward pass as follows:
+ >>> tokenizer = PerceiverTokenizer()
+ >>> text = "hello world"
+ >>> inputs = tokenizer(text, return_tensors="pt").input_ids
+
+ >>> with torch.no_grad():
+ ... outputs = model(inputs=inputs)
+ >>> logits = outputs.logits
+ >>> list(logits.shape)
+ [1, 2]
+
+ >>> # to train, one can train the model using standard cross-entropy:
+ >>> criterion = torch.nn.CrossEntropyLoss()
+
+ >>> labels = torch.tensor([1])
+ >>> loss = criterion(logits, labels)
+
+ >>> # EXAMPLE 2: using the Perceiver to classify images
+ >>> # - we define an ImagePreprocessor, which can be used to embed images
+ >>> config = PerceiverConfig(image_size=224)
+ >>> preprocessor = PerceiverImagePreprocessor(
+ ... config,
+ ... prep_type="conv1x1",
+ ... spatial_downsample=1,
+ ... out_channels=256,
+ ... position_encoding_type="trainable",
+ ... concat_or_add_pos="concat",
+ ... project_pos_dim=256,
+ ... trainable_position_encoding_kwargs=dict(
+ ... num_channels=256,
+ ... index_dims=config.image_size**2,
+ ... ),
+ ... )
+
+ >>> model = PerceiverModel(
+ ... config,
+ ... input_preprocessor=preprocessor,
+ ... decoder=PerceiverClassificationDecoder(
+ ... config,
+ ... num_channels=config.d_latents,
+ ... trainable_position_encoding_kwargs=dict(num_channels=config.d_latents, index_dims=1),
+ ... use_query_residual=True,
+ ... ),
+ ... )
+
+ >>> # you can then do a forward pass as follows:
+ >>> image_processor = PerceiverImageProcessor()
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+ >>> inputs = image_processor(image, return_tensors="pt").pixel_values
+
+ >>> with torch.no_grad():
+ ... outputs = model(inputs=inputs)
+ >>> logits = outputs.logits
+ >>> list(logits.shape)
+ [1, 2]
+
+ >>> # to train, one can train the model using standard cross-entropy:
+ >>> criterion = torch.nn.CrossEntropyLoss()
+
+ >>> labels = torch.tensor([1])
+ >>> loss = criterion(logits, labels)
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if self.input_preprocessor is not None:
+ inputs, modality_sizes, inputs_without_pos = self.input_preprocessor(inputs)
+ else:
+ modality_sizes = None
+ inputs_without_pos = None
+ if inputs.size()[-1] != self.config.d_model:
+ raise ValueError(
+ f"Last dimension of the inputs: {inputs.size()[-1]} doesn't correspond to config.d_model:"
+ f" {self.config.d_model}. Make sure to set config.d_model appropriately."
+ )
+
+ batch_size, seq_length, _ = inputs.size()
+ device = inputs.device
+
+ # If no attention mask is provided, make them all ones
+ if attention_mask is None:
+ attention_mask = torch.ones((batch_size, seq_length), device=device)
+ # Make the attention mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
+ extended_attention_mask = self.invert_attention_mask(attention_mask)
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_blocks x num_heads]
+ # and head_mask is converted to shape [num_blocks x batch x num_heads x N x N]
+ head_mask = self.get_head_mask(head_mask, self.config.num_blocks * self.config.num_self_attends_per_block)
+
+ embedding_output = self.embeddings(batch_size=batch_size)
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ attention_mask=None,
+ head_mask=head_mask,
+ inputs=inputs,
+ inputs_mask=extended_attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+
+ logits = None
+ if self.decoder:
+ if subsampled_output_points is not None:
+ output_modality_sizes = {
+ "audio": subsampled_output_points["audio"].shape[0],
+ "image": subsampled_output_points["image"].shape[0],
+ "label": 1,
+ }
+ else:
+ output_modality_sizes = modality_sizes
+ decoder_query = self.decoder.decoder_query(
+ inputs, modality_sizes, inputs_without_pos, subsampled_points=subsampled_output_points
+ )
+ decoder_outputs = self.decoder(
+ decoder_query,
+ z=sequence_output,
+ query_mask=extended_attention_mask,
+ output_attentions=output_attentions,
+ )
+ logits = decoder_outputs.logits
+
+ # add cross-attentions of decoder
+ if output_attentions and decoder_outputs.cross_attentions is not None:
+ if return_dict:
+ encoder_outputs.cross_attentions = (
+ encoder_outputs.cross_attentions + decoder_outputs.cross_attentions
+ )
+ else:
+ encoder_outputs = encoder_outputs + decoder_outputs.cross_attentions
+
+ if self.output_postprocessor:
+ logits = self.output_postprocessor(logits, modality_sizes=output_modality_sizes)
+
+ if not return_dict:
+ if logits is not None:
+ return (logits, sequence_output) + encoder_outputs[1:]
+ else:
+ return (sequence_output,) + encoder_outputs[1:]
+
+ return PerceiverModelOutput(
+ logits=logits,
+ last_hidden_state=sequence_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ cross_attentions=encoder_outputs.cross_attentions,
+ )
+
+
+@add_start_docstrings("""Example use of Perceiver for masked language modeling.""", PERCEIVER_START_DOCSTRING)
+class PerceiverForMaskedLM(PerceiverPreTrainedModel):
+ def __init__(self, config: PerceiverConfig):
+ super().__init__(config)
+
+ text_preprocessor = PerceiverTextPreprocessor(config)
+
+ trainable_position_encoding_kwargs_decoder = {
+ "num_channels": text_preprocessor.num_channels,
+ "index_dims": config.max_position_embeddings,
+ }
+
+ self.perceiver = PerceiverModel(
+ config,
+ input_preprocessor=text_preprocessor,
+ decoder=PerceiverBasicDecoder(
+ config,
+ output_num_channels=config.d_latents,
+ output_index_dims=config.max_position_embeddings, # we need to define the seq_len of the inputs beforehand
+ num_channels=text_preprocessor.num_channels,
+ qk_channels=8 * 32,
+ v_channels=text_preprocessor.num_channels,
+ num_heads=8,
+ use_query_residual=False,
+ final_project=False,
+ trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder,
+ ),
+ )
+ self.embedding_decoder = PerceiverEmbeddingDecoder(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(PERCEIVER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=PerceiverMaskedLMOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ inputs: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ labels: Optional[torch.Tensor] = None,
+ return_dict: Optional[bool] = None,
+ input_ids: Optional[torch.Tensor] = None,
+ ) -> Union[Tuple, PerceiverMaskedLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, PerceiverForMaskedLM
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("deepmind/language-perceiver")
+ >>> model = PerceiverForMaskedLM.from_pretrained("deepmind/language-perceiver")
+
+ >>> # training
+ >>> text = "This is an incomplete sentence where some words are missing."
+ >>> inputs = tokenizer(text, padding="max_length", return_tensors="pt")
+ >>> # mask " missing."
+ >>> inputs["input_ids"][0, 52:61] = tokenizer.mask_token_id
+ >>> labels = tokenizer(text, padding="max_length", return_tensors="pt").input_ids
+
+ >>> outputs = model(**inputs, labels=labels)
+ >>> loss = outputs.loss
+ >>> round(loss.item(), 2)
+ 19.87
+
+ >>> logits = outputs.logits
+ >>> list(logits.shape)
+ [1, 2048, 262]
+
+ >>> # inference
+ >>> text = "This is an incomplete sentence where some words are missing."
+ >>> encoding = tokenizer(text, padding="max_length", return_tensors="pt")
+
+ >>> # mask bytes corresponding to " missing.". Note that the model performs much better if the masked span starts with a space.
+ >>> encoding["input_ids"][0, 52:61] = tokenizer.mask_token_id
+
+ >>> # forward pass
+ >>> with torch.no_grad():
+ ... outputs = model(**encoding)
+ >>> logits = outputs.logits
+ >>> list(logits.shape)
+ [1, 2048, 262]
+
+ >>> masked_tokens_predictions = logits[0, 52:61].argmax(dim=-1).tolist()
+ >>> tokenizer.decode(masked_tokens_predictions)
+ ' missing.'
+ ```"""
+ if inputs is not None and input_ids is not None:
+ raise ValueError("You cannot use both `inputs` and `input_ids`")
+ elif inputs is None and input_ids is not None:
+ inputs = input_ids
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.perceiver(
+ inputs=inputs,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ logits = self.embedding_decoder(
+ outputs.logits if return_dict else outputs[0], embedding_layer=self.perceiver.input_preprocessor.embeddings
+ )
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
+ masked_lm_loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return PerceiverMaskedLMOutput(
+ loss=masked_lm_loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+
+@add_start_docstrings("""Example use of Perceiver for text classification.""", PERCEIVER_START_DOCSTRING)
+class PerceiverForSequenceClassification(PerceiverPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ trainable_position_encoding_kwargs_decoder = {"num_channels": config.d_latents, "index_dims": 1}
+
+ self.num_labels = config.num_labels
+ self.perceiver = PerceiverModel(
+ config,
+ input_preprocessor=PerceiverTextPreprocessor(config),
+ decoder=PerceiverClassificationDecoder(
+ config,
+ num_channels=config.d_latents,
+ trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder,
+ use_query_residual=True,
+ ),
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(PERCEIVER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ inputs: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ labels: Optional[torch.Tensor] = None,
+ return_dict: Optional[bool] = None,
+ input_ids: Optional[torch.Tensor] = None,
+ ) -> Union[Tuple, PerceiverClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the classification/regression loss. Indices should be in `[0, ..., config.num_labels -
+ 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels >
+ 1` a classification loss is computed (Cross-Entropy).
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, PerceiverForSequenceClassification
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("deepmind/language-perceiver")
+ >>> model = PerceiverForSequenceClassification.from_pretrained("deepmind/language-perceiver")
+
+ >>> text = "hello world"
+ >>> inputs = tokenizer(text, return_tensors="pt").input_ids
+ >>> outputs = model(inputs=inputs)
+ >>> logits = outputs.logits
+ >>> list(logits.shape)
+ [1, 2]
+ ```"""
+ if inputs is not None and input_ids is not None:
+ raise ValueError("You cannot use both `inputs` and `input_ids`")
+ elif inputs is None and input_ids is not None:
+ inputs = input_ids
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.perceiver(
+ inputs=inputs,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ logits = outputs.logits if return_dict else outputs[0]
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return PerceiverClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+Example use of Perceiver for image classification, for tasks such as ImageNet.
+
+This model uses learned position embeddings. In other words, this model is not given any privileged information about
+the structure of images. As shown in the paper, this model can achieve a top-1 accuracy of 72.7 on ImageNet.
+
+[`PerceiverForImageClassificationLearned`] uses [`~models.perceiver.modeling_perceiver.PerceiverImagePreprocessor`]
+(with `prep_type="conv1x1"`) to preprocess the input images, and
+[`~models.perceiver.modeling_perceiver.PerceiverClassificationDecoder`] to decode the latent representation of
+[`PerceiverModel`] into classification logits.
+""",
+ PERCEIVER_START_DOCSTRING,
+)
+class PerceiverForImageClassificationLearned(PerceiverPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ trainable_position_encoding_kwargs_preprocessor = {"num_channels": 256, "index_dims": config.image_size**2}
+ trainable_position_encoding_kwargs_decoder = {"num_channels": config.d_latents, "index_dims": 1}
+
+ self.num_labels = config.num_labels
+ self.perceiver = PerceiverModel(
+ config,
+ input_preprocessor=PerceiverImagePreprocessor(
+ config,
+ prep_type="conv1x1",
+ spatial_downsample=1,
+ out_channels=256,
+ position_encoding_type="trainable",
+ concat_or_add_pos="concat",
+ project_pos_dim=256,
+ trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_preprocessor,
+ ),
+ decoder=PerceiverClassificationDecoder(
+ config,
+ num_channels=config.d_latents,
+ trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder,
+ use_query_residual=True,
+ ),
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(PERCEIVER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ inputs: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ labels: Optional[torch.Tensor] = None,
+ return_dict: Optional[bool] = None,
+ pixel_values: Optional[torch.Tensor] = None,
+ ) -> Union[Tuple, PerceiverClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, PerceiverForImageClassificationLearned
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("deepmind/vision-perceiver-learned")
+ >>> model = PerceiverForImageClassificationLearned.from_pretrained("deepmind/vision-perceiver-learned")
+
+ >>> inputs = image_processor(images=image, return_tensors="pt").pixel_values
+ >>> outputs = model(inputs=inputs)
+ >>> logits = outputs.logits
+ >>> list(logits.shape)
+ [1, 1000]
+
+ >>> # model predicts one of the 1000 ImageNet classes
+ >>> predicted_class_idx = logits.argmax(-1).item()
+ >>> print("Predicted class:", model.config.id2label[predicted_class_idx])
+ Predicted class: tabby, tabby cat
+ ```"""
+ if inputs is not None and pixel_values is not None:
+ raise ValueError("You cannot use both `inputs` and `pixel_values`")
+ elif inputs is None and pixel_values is not None:
+ inputs = pixel_values
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.perceiver(
+ inputs=inputs,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ logits = outputs.logits if return_dict else outputs[0]
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return PerceiverClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+Example use of Perceiver for image classification, for tasks such as ImageNet.
+
+This model uses fixed 2D Fourier position embeddings. As shown in the paper, this model can achieve a top-1 accuracy of
+79.0 on ImageNet, and 84.5 when pre-trained on a large-scale dataset (i.e. JFT).
+
+[`PerceiverForImageClassificationLearned`] uses [`~models.perceiver.modeling_perceiver.PerceiverImagePreprocessor`]
+(with `prep_type="pixels"`) to preprocess the input images, and
+[`~models.perceiver.modeling_perceiver.PerceiverClassificationDecoder`] to decode the latent representation of
+[`PerceiverModel`] into classification logits.
+""",
+ PERCEIVER_START_DOCSTRING,
+)
+class PerceiverForImageClassificationFourier(PerceiverPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ fourier_position_encoding_kwargs_preprocessor = {
+ "concat_pos": True,
+ "max_resolution": (224, 224),
+ "num_bands": 64,
+ "sine_only": False,
+ }
+ trainable_position_encoding_kwargs_decoder = {"num_channels": config.d_latents, "index_dims": 1}
+
+ self.num_labels = config.num_labels
+ self.perceiver = PerceiverModel(
+ config,
+ input_preprocessor=PerceiverImagePreprocessor(
+ config,
+ prep_type="pixels",
+ spatial_downsample=1,
+ fourier_position_encoding_kwargs=fourier_position_encoding_kwargs_preprocessor,
+ ),
+ decoder=PerceiverClassificationDecoder(
+ config,
+ num_channels=config.d_latents,
+ trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder,
+ use_query_residual=True,
+ ),
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(PERCEIVER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ inputs: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ labels: Optional[torch.Tensor] = None,
+ return_dict: Optional[bool] = None,
+ pixel_values: Optional[torch.Tensor] = None,
+ ) -> Union[Tuple, PerceiverClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, PerceiverForImageClassificationFourier
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("deepmind/vision-perceiver-fourier")
+ >>> model = PerceiverForImageClassificationFourier.from_pretrained("deepmind/vision-perceiver-fourier")
+
+ >>> inputs = image_processor(images=image, return_tensors="pt").pixel_values
+ >>> outputs = model(inputs=inputs)
+ >>> logits = outputs.logits
+ >>> list(logits.shape)
+ [1, 1000]
+
+ >>> # model predicts one of the 1000 ImageNet classes
+ >>> predicted_class_idx = logits.argmax(-1).item()
+ >>> print("Predicted class:", model.config.id2label[predicted_class_idx])
+ Predicted class: tabby, tabby cat
+ ```"""
+ if inputs is not None and pixel_values is not None:
+ raise ValueError("You cannot use both `inputs` and `pixel_values`")
+ elif inputs is None and pixel_values is not None:
+ inputs = pixel_values
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.perceiver(
+ inputs=inputs,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ logits = outputs.logits if return_dict else outputs[0]
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return PerceiverClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+Example use of Perceiver for image classification, for tasks such as ImageNet.
+
+This model uses a 2D conv+maxpool preprocessing network. As shown in the paper, this model can achieve a top-1 accuracy
+of 82.1 on ImageNet.
+
+[`PerceiverForImageClassificationLearned`] uses [`~models.perceiver.modeling_perceiver.PerceiverImagePreprocessor`]
+(with `prep_type="conv"`) to preprocess the input images, and
+[`~models.perceiver.modeling_perceiver.PerceiverClassificationDecoder`] to decode the latent representation of
+[`PerceiverModel`] into classification logits.
+""",
+ PERCEIVER_START_DOCSTRING,
+)
+class PerceiverForImageClassificationConvProcessing(PerceiverPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ fourier_position_encoding_kwargs_preprocessor = {
+ "concat_pos": True,
+ "max_resolution": (56, 56),
+ "num_bands": 64,
+ "sine_only": False,
+ }
+ trainable_position_encoding_kwargs_decoder = {"num_channels": config.d_latents, "index_dims": 1}
+
+ self.num_labels = config.num_labels
+ self.perceiver = PerceiverModel(
+ config,
+ input_preprocessor=PerceiverImagePreprocessor(
+ config,
+ prep_type="conv",
+ spatial_downsample=1,
+ position_encoding_type="fourier",
+ fourier_position_encoding_kwargs=fourier_position_encoding_kwargs_preprocessor,
+ ),
+ decoder=PerceiverClassificationDecoder(
+ config,
+ num_channels=config.d_latents,
+ trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder,
+ use_query_residual=True,
+ ),
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(PERCEIVER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ inputs: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ labels: Optional[torch.Tensor] = None,
+ return_dict: Optional[bool] = None,
+ pixel_values: Optional[torch.Tensor] = None,
+ ) -> Union[Tuple, PerceiverClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, PerceiverForImageClassificationConvProcessing
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("deepmind/vision-perceiver-conv")
+ >>> model = PerceiverForImageClassificationConvProcessing.from_pretrained("deepmind/vision-perceiver-conv")
+
+ >>> inputs = image_processor(images=image, return_tensors="pt").pixel_values
+ >>> outputs = model(inputs=inputs)
+ >>> logits = outputs.logits
+ >>> list(logits.shape)
+ [1, 1000]
+
+ >>> # model predicts one of the 1000 ImageNet classes
+ >>> predicted_class_idx = logits.argmax(-1).item()
+ >>> print("Predicted class:", model.config.id2label[predicted_class_idx])
+ Predicted class: tabby, tabby cat
+ ```"""
+ if inputs is not None and pixel_values is not None:
+ raise ValueError("You cannot use both `inputs` and `pixel_values`")
+ elif inputs is None and pixel_values is not None:
+ inputs = pixel_values
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.perceiver(
+ inputs=inputs,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ logits = outputs.logits if return_dict else outputs[0]
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return PerceiverClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+Example use of Perceiver for optical flow, for tasks such as Sintel and KITTI. [`PerceiverForOpticalFlow`] uses
+[`~models.perceiver.modeling_perceiver.PerceiverImagePreprocessor`] (with *prep_type="patches"*) to preprocess the
+input images, and [`~models.perceiver.modeling_perceiver.PerceiverOpticalFlowDecoder`] to decode the latent
+representation of [`PerceiverModel`].
+
+As input, one concatenates 2 subsequent frames along the channel dimension and extract a 3 x 3 patch around each pixel
+(leading to 3 x 3 x 3 x 2 = 54 values for each pixel). Fixed Fourier position encodings are used to encode the position
+of each pixel in the patch. Next, one applies the Perceiver encoder. To decode, one queries the latent representation
+using the same encoding used for the input.
+""",
+ PERCEIVER_START_DOCSTRING,
+)
+class PerceiverForOpticalFlow(PerceiverPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ fourier_position_encoding_kwargs_preprocessor = {
+ "num_bands": 64,
+ "max_resolution": config.train_size,
+ "sine_only": False,
+ "concat_pos": True,
+ }
+ fourier_position_encoding_kwargs_decoder = {
+ "concat_pos": True,
+ "max_resolution": config.train_size,
+ "num_bands": 64,
+ "sine_only": False,
+ }
+
+ image_preprocessor = PerceiverImagePreprocessor(
+ config,
+ prep_type="patches",
+ spatial_downsample=1,
+ conv_after_patching=True,
+ conv_after_patching_in_channels=54,
+ temporal_downsample=2,
+ position_encoding_type="fourier",
+ # position_encoding_kwargs
+ fourier_position_encoding_kwargs=fourier_position_encoding_kwargs_preprocessor,
+ )
+
+ self.perceiver = PerceiverModel(
+ config,
+ input_preprocessor=image_preprocessor,
+ decoder=PerceiverOpticalFlowDecoder(
+ config,
+ num_channels=image_preprocessor.num_channels,
+ output_image_shape=config.train_size,
+ rescale_factor=100.0,
+ # decoder kwargs
+ use_query_residual=False,
+ output_num_channels=2,
+ # We query the decoder using the first frame features
+ # rather than a standard decoder position encoding.
+ position_encoding_type="fourier",
+ fourier_position_encoding_kwargs=fourier_position_encoding_kwargs_decoder,
+ ),
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(PERCEIVER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ inputs: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ labels: Optional[torch.Tensor] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, PerceiverClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the optical flow loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import PerceiverForOpticalFlow
+ >>> import torch
+
+ >>> model = PerceiverForOpticalFlow.from_pretrained("deepmind/optical-flow-perceiver")
+
+ >>> # in the Perceiver IO paper, the authors extract a 3 x 3 patch around each pixel,
+ >>> # leading to 3 x 3 x 3 = 27 values for each pixel (as each pixel also has 3 color channels)
+ >>> # patches have shape (batch_size, num_frames, num_channels, height, width)
+ >>> # the authors train on resolutions of 368 x 496
+ >>> patches = torch.randn(1, 2, 27, 368, 496)
+ >>> outputs = model(inputs=patches)
+ >>> logits = outputs.logits
+ >>> list(logits.shape)
+ [1, 368, 496, 2]
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.perceiver(
+ inputs=inputs,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ logits = outputs.logits if return_dict else outputs[0]
+
+ loss = None
+ if labels is not None:
+ raise NotImplementedError("Optical flow training is not yet supported")
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return PerceiverClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+Example use of Perceiver for multimodal (video) autoencoding, for tasks such as Kinetics-700.
+
+[`PerceiverForMultimodalAutoencoding`] uses [`~models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor`] to
+preprocess the 3 modalities: images, audio and class labels. This preprocessor uses modality-specific preprocessors to
+preprocess every modality separately, after which they are concatenated. Trainable position embeddings are used to pad
+each modality to the same number of channels to make concatenation along the time dimension possible. Next, one applies
+the Perceiver encoder.
+
+[`~models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder`] is used to decode the latent representation of
+[`PerceiverModel`]. This decoder uses each modality-specific decoder to construct queries. The decoder queries are
+created based on the inputs after preprocessing. However, autoencoding an entire video in a single forward pass is
+computationally infeasible, hence one only uses parts of the decoder queries to do cross-attention with the latent
+representation. This is determined by the subsampled indices for each modality, which can be provided as additional
+input to the forward pass of [`PerceiverForMultimodalAutoencoding`].
+
+[`~models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder`] also pads the decoder queries of the different
+modalities to the same number of channels, in order to concatenate them along the time dimension. Next, cross-attention
+is performed with the latent representation of [`PerceiverModel`].
+
+Finally, [`~models.perceiver.modeling_perceiver.PerceiverMultiModalPostprocessor`] is used to turn this tensor into an
+actual video. It first splits up the output into the different modalities, and then applies the respective
+postprocessor for each modality.
+
+Note that, by masking the classification label during evaluation (i.e. simply providing a tensor of zeros for the
+"label" modality), this auto-encoding model becomes a Kinetics 700 video classifier.
+""",
+ PERCEIVER_START_DOCSTRING,
+)
+class PerceiverForMultimodalAutoencoding(PerceiverPreTrainedModel):
+ def __init__(self, config: PerceiverConfig):
+ super().__init__(config)
+
+ n_audio_samples = config.num_frames * config.audio_samples_per_frame
+
+ input_preprocessor = PerceiverMultimodalPreprocessor(
+ min_padding_size=4,
+ modalities={
+ "audio": PerceiverAudioPreprocessor(
+ config,
+ position_encoding_type="fourier",
+ fourier_position_encoding_kwargs={
+ "num_bands": 192,
+ "max_resolution": (n_audio_samples,),
+ "sine_only": False,
+ "concat_pos": True,
+ },
+ prep_type="patches",
+ samples_per_patch=config.samples_per_patch,
+ ),
+ "image": PerceiverImagePreprocessor(
+ config,
+ position_encoding_type="fourier",
+ fourier_position_encoding_kwargs={
+ "num_bands": 32,
+ "max_resolution": (config.num_frames, config.image_size, config.image_size),
+ "sine_only": False,
+ "concat_pos": True,
+ },
+ prep_type="patches",
+ spatial_downsample=4,
+ temporal_downsample=1,
+ ),
+ "label": PerceiverOneHotPreprocessor(config),
+ },
+ mask_probs={"image": 0.0, "audio": 0.0, "label": 1.0},
+ )
+
+ image_decoder = PerceiverBasicVideoAutoencodingDecoder(
+ config,
+ # Autoencoding, don't pass inputs to the queries.
+ concat_preprocessed_input=False,
+ output_shape=config.output_shape,
+ output_num_channels=config.output_num_channels,
+ use_query_residual=False,
+ position_encoding_only=True,
+ position_encoding_type="fourier",
+ fourier_position_encoding_kwargs={
+ "num_bands": 32,
+ "max_resolution": (config.num_frames, config.image_size, config.image_size),
+ "sine_only": False,
+ "concat_pos": True,
+ },
+ )
+
+ decoder = PerceiverMultimodalDecoder(
+ config,
+ # Autoencoding, don't pass inputs to the queries.
+ concat_preprocessed_input=False,
+ # Modality specific decoders are used ONLY to generate queries.
+ # All modalties are decoded together using a unified decoder.
+ modalities={
+ "audio": PerceiverBasicDecoder(
+ config,
+ # Autoencoding, don't pass inputs to the queries.
+ concat_preprocessed_input=False,
+ output_index_dims=(n_audio_samples // config.samples_per_patch,),
+ output_num_channels=config.output_num_channels,
+ use_query_residual=False,
+ position_encoding_only=True,
+ position_encoding_type="fourier",
+ fourier_position_encoding_kwargs={
+ "num_bands": 192,
+ "max_resolution": (n_audio_samples,),
+ "sine_only": False,
+ "concat_pos": True,
+ },
+ ),
+ "image": image_decoder,
+ "label": PerceiverClassificationDecoder(
+ config,
+ # Autoencoding, don't pass inputs to the queries.
+ concat_preprocessed_input=False,
+ use_query_residual=False,
+ position_encoding_only=True,
+ position_encoding_type="trainable",
+ trainable_position_encoding_kwargs={
+ "num_channels": config._label_trainable_num_channels,
+ "index_dims": 1,
+ },
+ ),
+ },
+ num_outputs=None,
+ output_num_channels=config.output_num_channels,
+ use_query_residual=False,
+ )
+
+ output_postprocessor = PerceiverMultimodalPostprocessor(
+ modalities={
+ "audio": PerceiverAudioPostprocessor(config, in_channels=config.output_num_channels),
+ "image": PerceiverProjectionPostprocessor(in_channels=config.output_num_channels, out_channels=3),
+ "label": PerceiverClassificationPostprocessor(config, in_channels=config.output_num_channels),
+ }
+ )
+
+ self.perceiver = PerceiverModel(
+ config,
+ input_preprocessor=input_preprocessor,
+ decoder=decoder,
+ output_postprocessor=output_postprocessor,
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(PERCEIVER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=PerceiverClassifierOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ inputs: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ subsampled_output_points: Optional[Dict[str, torch.Tensor]] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ labels: Optional[torch.Tensor] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, PerceiverClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import PerceiverForMultimodalAutoencoding
+ >>> import torch
+ >>> import numpy as np
+
+ >>> # create multimodal inputs
+ >>> images = torch.randn((1, 16, 3, 224, 224))
+ >>> audio = torch.randn((1, 30720, 1))
+ >>> inputs = dict(image=images, audio=audio, label=torch.zeros((images.shape[0], 700)))
+
+ >>> model = PerceiverForMultimodalAutoencoding.from_pretrained("deepmind/multimodal-perceiver")
+
+ >>> # in the Perceiver IO paper, videos are auto-encoded in chunks
+ >>> # each chunk subsamples different index dimensions of the image and audio modality decoder queries
+ >>> nchunks = 128
+ >>> image_chunk_size = np.prod((16, 224, 224)) // nchunks
+ >>> audio_chunk_size = audio.shape[1] // model.config.samples_per_patch // nchunks
+ >>> # process the first chunk
+ >>> chunk_idx = 0
+ >>> subsampling = {
+ ... "image": torch.arange(image_chunk_size * chunk_idx, image_chunk_size * (chunk_idx + 1)),
+ ... "audio": torch.arange(audio_chunk_size * chunk_idx, audio_chunk_size * (chunk_idx + 1)),
+ ... "label": None,
+ ... }
+
+ >>> outputs = model(inputs=inputs, subsampled_output_points=subsampling)
+ >>> logits = outputs.logits
+ >>> list(logits["audio"].shape)
+ [1, 240]
+
+ >>> list(logits["image"].shape)
+ [1, 6272, 3]
+
+ >>> list(logits["label"].shape)
+ [1, 700]
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.perceiver(
+ inputs=inputs,
+ attention_mask=attention_mask,
+ subsampled_output_points=subsampled_output_points,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ logits = outputs.logits if return_dict else outputs[0]
+
+ loss = None
+ if labels is not None:
+ raise NotImplementedError("Multimodal autoencoding training is not yet supported")
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return PerceiverClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+
+# Below: position encodings
+
+
+def build_position_encoding(
+ position_encoding_type,
+ out_channels=None,
+ project_pos_dim=-1,
+ trainable_position_encoding_kwargs=None,
+ fourier_position_encoding_kwargs=None,
+):
+ """
+ Builds the position encoding.
+
+ Args:
+ - out_channels: refers to the number of channels of the position encodings.
+ - project_pos_dim: if specified, will project the position encodings to this dimension.
+
+ """
+
+ if position_encoding_type == "trainable":
+ if not trainable_position_encoding_kwargs:
+ raise ValueError("Make sure to pass trainable_position_encoding_kwargs")
+ output_pos_enc = PerceiverTrainablePositionEncoding(**trainable_position_encoding_kwargs)
+ elif position_encoding_type == "fourier":
+ # We don't use the index_dims argument, as this is only known during the forward pass
+ if not fourier_position_encoding_kwargs:
+ raise ValueError("Make sure to pass fourier_position_encoding_kwargs")
+ output_pos_enc = PerceiverFourierPositionEncoding(**fourier_position_encoding_kwargs)
+ else:
+ raise ValueError(f"Unknown position encoding type: {position_encoding_type}.")
+
+ # Optionally, project the position encoding to a target dimension:
+ positions_projection = nn.Linear(out_channels, project_pos_dim) if project_pos_dim > 0 else nn.Identity()
+
+ return output_pos_enc, positions_projection
+
+
+# Below: Perceiver decoders
+
+
+class PerceiverAbstractDecoder(nn.Module, metaclass=abc.ABCMeta):
+ """Perceiver abstract decoder."""
+
+ @abc.abstractmethod
+ def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None):
+ raise NotImplementedError
+
+ @property
+ @abc.abstractmethod
+ def num_query_channels(self):
+ raise NotImplementedError
+
+ @abc.abstractmethod
+ def forward(self, query, z, query_mask=None):
+ raise NotImplementedError
+
+
+class PerceiverProjectionDecoder(PerceiverAbstractDecoder):
+ """
+ Baseline projection decoder (no cross-attention).
+
+ Args:
+ config ([`PerceiverConfig`]):
+ Model configuration.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ self.classifier = nn.Linear(config.d_latents, config.num_labels)
+
+ def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None):
+ return None
+
+ def forward(
+ self, query: torch.Tensor, z: torch.FloatTensor, query_mask: Optional[torch.FloatTensor] = None
+ ) -> torch.FloatTensor:
+ # (batch_size, num_latents, d_latents) -> (batch_size, d_latents)
+ z = torch.mean(z, dim=1)
+ # (batch_size, d_latents) -> (batch_size, config.num_labels)
+ logits = self.classifier(z)
+ return logits
+
+
+class PerceiverBasicDecoder(PerceiverAbstractDecoder):
+ """
+ Cross-attention-based decoder. This class can be used to decode the final hidden states of the latents using a
+ cross-attention operation, in which the latents produce keys and values.
+
+ The shape of the output of this class depends on how one defines the output queries (also called decoder queries).
+
+ Args:
+ config ([*PerceiverConfig*]):
+ Model configuration.
+ output_num_channels (`int`, *optional*):
+ The number of channels in the output. Will only be used in case *final_project* is set to `True`.
+ position_encoding_type (`str`, *optional*, defaults to "trainable"):
+ The type of position encoding to use. Can be either "trainable", "fourier", or "none".
+ output_index_dims (`int`, *optional*):
+ The number of dimensions of the output queries. Ignored if 'position_encoding_type' == 'none'.
+ num_channels (`int`, *optional*, defaults to 128):
+ The number of channels of the decoder queries. Ignored if 'position_encoding_type' == 'none'.
+ qk_channels (`int`, *optional*):
+ The number of channels of the queries and keys in the cross-attention layer.
+ v_channels (`int`, *optional*):
+ The number of channels of the values in the cross-attention layer.
+ num_heads (`int`, *optional*, defaults to 1):
+ The number of attention heads in the cross-attention layer.
+ widening_factor (`int`, *optional*, defaults to 1):
+ The widening factor of the cross-attention layer.
+ use_query_residual (`bool`, *optional*, defaults to `False`):
+ Whether to use a residual connection between the query and the output of the cross-attention layer.
+ concat_preprocessed_input (`bool`, *optional*, defaults to `False`):
+ Whether to concatenate the preprocessed input to the query.
+ final_project (`bool`, *optional*, defaults to `True`):
+ Whether to project the output of the cross-attention layer to a target dimension.
+ position_encoding_only (`bool`, *optional*, defaults to `False`):
+ Whether to only use this class to define output queries.
+ """
+
+ def __init__(
+ self,
+ config: PerceiverConfig,
+ output_num_channels: int,
+ position_encoding_type: Optional[str] = "trainable",
+ # The following 2 arguments are ignored if position_encoding_type == 'none':
+ output_index_dims: Optional[int] = None,
+ num_channels: Optional[int] = 128,
+ subsampled_index_dims: Optional[int] = None,
+ qk_channels: Optional[int] = None,
+ v_channels: Optional[int] = None,
+ num_heads: Optional[int] = 1,
+ widening_factor: Optional[int] = 1,
+ use_query_residual: Optional[bool] = False,
+ concat_preprocessed_input: Optional[bool] = False,
+ final_project: Optional[bool] = True,
+ position_encoding_only: Optional[bool] = False,
+ **position_encoding_kwargs,
+ ) -> None:
+ super().__init__()
+
+ self.output_num_channels = output_num_channels
+ # If `none`, the decoder will not construct any position encodings.
+ # You should construct your own when querying the decoder.
+ self.output_position_encodings = None
+ self.position_encoding_type = position_encoding_type
+ self.position_encoding_kwargs = position_encoding_kwargs
+ if position_encoding_type != "none":
+ self.output_position_encodings, self.positions_projection = build_position_encoding(
+ position_encoding_type=position_encoding_type, **position_encoding_kwargs
+ )
+
+ self.output_index_dims = output_index_dims
+ self.num_channels = num_channels
+ if subsampled_index_dims is None:
+ subsampled_index_dims = output_index_dims
+ self.subsampled_index_dims = subsampled_index_dims
+ self.concat_preprocessed_input = concat_preprocessed_input
+ self.final_project = final_project
+ self.position_encoding_only = position_encoding_only
+
+ # for multimodal autoencoding, we don't need the decoder cross-attention and final layer
+ # so then we will set position_encoding_only to True
+ if not self.position_encoding_only:
+ self.decoding_cross_attention = PerceiverLayer(
+ config,
+ is_cross_attention=True,
+ qk_channels=qk_channels,
+ v_channels=v_channels,
+ num_heads=num_heads,
+ q_dim=num_channels,
+ kv_dim=config.d_latents,
+ widening_factor=widening_factor,
+ use_query_residual=use_query_residual,
+ )
+ self.final_layer = nn.Linear(num_channels, output_num_channels) if final_project else nn.Identity()
+
+ @property
+ def num_query_channels(self) -> int:
+ if self.position_encoding_type == "none": # Queries come from elsewhere
+ raise ValueError(
+ "You cannot calculate number of decoder query channels when position_encoding_type is set to none"
+ )
+ if self.position_encoding_only:
+ if "project_pos_dim" in self.position_encoding_kwargs:
+ return self.position_encoding_kwargs["project_pos_dim"]
+ return self.output_position_encodings.output_size()
+ if self.final_project:
+ return self.output_num_channels
+ return self.num_channels
+
+ def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None):
+ if self.position_encoding_type == "none": # Queries come from elsewhere
+ raise ValueError("You cannot construct decoder queries when position_encoding_type is set to none")
+ if subsampled_points is not None:
+ # subsampled_points are the indices if the inputs would be flattened
+ # however, the inputs aren't flattened, that's why we use unravel_index
+ # to get the indices for the unflattened array
+ # unravel_index returns a tuple (x_idx, y_idx, ...)
+ # stack to get the [n, d] tensor of coordinates
+ indices = [torch.from_numpy(x) for x in np.unravel_index(subsampled_points.cpu(), self.output_index_dims)]
+ pos = torch.stack(indices, dim=1)
+ batch_size = inputs.shape[0]
+ # Map these coordinates to [-1, 1]
+ pos = -1 + 2 * pos / torch.tensor(self.output_index_dims)[None, :]
+ pos = torch.broadcast_to(pos[None], [batch_size, pos.shape[0], pos.shape[1]])
+ # Construct the position encoding.
+ if self.position_encoding_type == "trainable":
+ pos_emb = self.output_position_encodings(batch_size)
+ elif self.position_encoding_type == "fourier":
+ pos_emb = self.output_position_encodings(
+ self.output_index_dims, batch_size=batch_size, device=inputs.device, dtype=inputs.dtype, pos=pos
+ )
+
+ # Optionally project them to a target dimension.
+ pos_emb = self.positions_projection(pos_emb)
+ pos_emb = torch.reshape(pos_emb, [pos_emb.shape[0], -1, pos_emb.shape[-1]])
+ else:
+ batch_size = inputs.shape[0]
+ index_dims = inputs.shape[2:]
+
+ # Construct the position encoding.
+ if self.position_encoding_type == "trainable":
+ pos_emb = self.output_position_encodings(batch_size)
+ elif self.position_encoding_type == "fourier":
+ pos_emb = self.output_position_encodings(
+ index_dims, batch_size, device=inputs.device, dtype=inputs.dtype
+ )
+
+ # Optionally project them to a target dimension.
+ pos_emb = self.positions_projection(pos_emb)
+
+ if self.concat_preprocessed_input:
+ if inputs_without_pos is None:
+ raise ValueError("Value is required for inputs_without_pos if concat_preprocessed_input is True")
+ pos_emb = torch.cat([inputs_without_pos, pos_emb], dim=-1)
+
+ return pos_emb
+
+ def forward(
+ self,
+ query: torch.Tensor,
+ z: torch.FloatTensor,
+ query_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> PerceiverDecoderOutput:
+ # Cross-attention decoding.
+ # key, value: B x N x K; query: B x M x K
+ # Attention maps -> B x N x M
+ # Output -> B x M x K
+ cross_attentions = () if output_attentions else None
+
+ layer_outputs = self.decoding_cross_attention(
+ query,
+ attention_mask=query_mask,
+ head_mask=None,
+ inputs=z,
+ inputs_mask=None,
+ output_attentions=output_attentions,
+ )
+ output = layer_outputs[0]
+
+ if output_attentions:
+ cross_attentions = cross_attentions + (layer_outputs[1],)
+
+ logits = self.final_layer(output)
+
+ return PerceiverDecoderOutput(logits=logits, cross_attentions=cross_attentions)
+
+
+class PerceiverClassificationDecoder(PerceiverAbstractDecoder):
+ """
+ Cross-attention based classification decoder. Light-weight wrapper of [`PerceiverBasicDecoder`] for logit output.
+ Will turn the output of the Perceiver encoder which is of shape (batch_size, num_latents, d_latents) to a tensor of
+ shape (batch_size, num_labels). The queries are of shape (batch_size, 1, num_labels).
+
+ Args:
+ config ([`PerceiverConfig`]):
+ Model configuration.
+ """
+
+ def __init__(self, config, **decoder_kwargs):
+ super().__init__()
+
+ self.num_labels = config.num_labels
+ self.decoder = PerceiverBasicDecoder(
+ config,
+ output_num_channels=self.num_labels,
+ output_index_dims=1, # Predict a single logit array.
+ **decoder_kwargs,
+ )
+
+ @property
+ def num_query_channels(self) -> int:
+ return self.decoder.num_query_channels
+
+ def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None):
+ return self.decoder.decoder_query(
+ inputs, modality_sizes, inputs_without_pos, subsampled_points=subsampled_points
+ )
+
+ def forward(
+ self,
+ query: torch.Tensor,
+ z: torch.FloatTensor,
+ query_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> PerceiverDecoderOutput:
+ decoder_outputs = self.decoder(query, z, output_attentions=output_attentions)
+
+ # B x 1 x num_classes -> B x num_classes
+ logits = decoder_outputs.logits[:, 0, :]
+
+ return PerceiverDecoderOutput(logits=logits, cross_attentions=decoder_outputs.cross_attentions)
+
+
+class PerceiverOpticalFlowDecoder(PerceiverAbstractDecoder):
+ """Cross-attention based optical flow decoder."""
+
+ def __init__(self, config, output_image_shape, output_num_channels=2, rescale_factor=100.0, **decoder_kwargs):
+ super().__init__()
+
+ self.output_image_shape = output_image_shape
+ self.output_num_channels = output_num_channels
+ self.rescale_factor = rescale_factor
+ self.decoder = PerceiverBasicDecoder(config, output_num_channels=output_num_channels, **decoder_kwargs)
+
+ @property
+ def num_query_channels(self) -> int:
+ return self.decoder.num_query_channels
+
+ def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None):
+ if subsampled_points is not None:
+ raise ValueError("FlowDecoder doesn't support subsampling yet.")
+ return inputs
+
+ def forward(
+ self,
+ query: torch.Tensor,
+ z: torch.FloatTensor,
+ query_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> PerceiverDecoderOutput:
+ decoder_outputs = self.decoder(query, z, output_attentions=output_attentions)
+ preds = decoder_outputs.logits
+ # Output flow and rescale.
+ preds /= self.rescale_factor
+ preds = preds.reshape([preds.shape[0]] + list(self.output_image_shape) + [preds.shape[-1]])
+ return PerceiverDecoderOutput(logits=preds, cross_attentions=decoder_outputs.cross_attentions)
+
+
+class PerceiverBasicVideoAutoencodingDecoder(PerceiverAbstractDecoder):
+ """
+ Cross-attention based video-autoencoding decoder. Light-weight wrapper of [*PerceiverBasicDecoder*] with video
+ reshaping logic.
+
+ Args:
+ config ([*PerceiverConfig*]):
+ Model configuration.
+ output_shape (`List[int]`):
+ Shape of the output as (batch_size, num_frames, height, width), excluding the channel dimension.
+ position_encoding_type (`str`):
+ The type of position encoding to use. Can be either "trainable", "fourier", or "none".
+ """
+
+ def __init__(
+ self, config: PerceiverConfig, output_shape: List[int], position_encoding_type: str, **decoder_kwargs
+ ) -> None:
+ super().__init__()
+ if len(output_shape) != 4: # B, T, H, W
+ raise ValueError(f"Expected rank 4 output_shape, got {output_shape}.")
+ # Build the decoder components:
+ self.output_shape = output_shape
+ self.output_num_channels = decoder_kwargs["output_num_channels"]
+
+ self.decoder = PerceiverBasicDecoder(
+ config,
+ output_index_dims=self.output_shape[1:4], # T*H*W
+ position_encoding_type=position_encoding_type,
+ **decoder_kwargs,
+ )
+
+ @property
+ def num_query_channels(self) -> int:
+ return self.decoder.num_query_channels
+
+ def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None):
+ return self.decoder.decoder_query(
+ inputs,
+ modality_sizes=modality_sizes,
+ inputs_without_pos=inputs_without_pos,
+ subsampled_points=subsampled_points,
+ )
+
+ def forward(
+ self, query: torch.Tensor, z: torch.FloatTensor, query_mask: Optional[torch.FloatTensor] = None
+ ) -> PerceiverDecoderOutput:
+ decoder_outputs = self.decoder(query, z)
+ logits = decoder_outputs.logits
+
+ logits = torch.reshape(logits, self.output_shape + [logits.shape[-1]])
+ return PerceiverDecoderOutput(logits=logits, cross_attentions=decoder_outputs.cross_attentions)
+
+
+def restructure(modality_sizes: ModalitySizeType, inputs: torch.Tensor) -> Mapping[str, torch.Tensor]:
+ """
+ Partitions a [B, N, C] tensor into tensors for each modality.
+
+ Args:
+ modality_sizes
+ dict specifying the size of the modality
+ inputs:
+ input tensor
+
+ Returns:
+ dict mapping name of modality to its associated tensor.
+ """
+ outputs = {}
+ index = 0
+ # Apply a predictable ordering to the modalities
+ for modality in sorted(modality_sizes.keys()):
+ size = modality_sizes[modality]
+ inp = inputs[:, index : index + size]
+ index += size
+ outputs[modality] = inp
+ return outputs
+
+
+class PerceiverMultimodalDecoder(PerceiverAbstractDecoder):
+ """
+ Multimodal decoding by composing uni-modal decoders. The *modalities* argument of the constructor is a dictionary
+ mapping modality name to the decoder of that modality. That decoder will be used to construct queries for that
+ modality. Modality-specific queries are padded with trainable modality-specific parameters, after which they are
+ concatenated along the time dimension.
+
+ Next, there is a shared cross attention operation across all modalities.
+
+ Args:
+ config ([*PerceiverConfig*]):
+ Model configuration.
+ modalities (`Dict[str, PerceiverAbstractDecoder]`):
+ Dictionary mapping modality name to the decoder of that modality.
+ num_outputs (`int`):
+ The number of outputs of the decoder.
+ output_num_channels (`int`):
+ The number of channels in the output.
+ min_padding_size (`int`, *optional*, defaults to 2):
+ The minimum padding size for all modalities. The final output will have num_channels equal to the maximum
+ channels across all modalities plus min_padding_size.
+ subsampled_index_dims (`Dict[str, PerceiverAbstractDecoder]`, *optional*):
+ Dictionary mapping modality name to the subsampled index dimensions to use for the decoder query of that
+ modality.
+ """
+
+ def __init__(
+ self,
+ config: PerceiverConfig,
+ modalities: Dict[str, PerceiverAbstractDecoder],
+ num_outputs: int,
+ output_num_channels: int,
+ min_padding_size: Optional[int] = 2,
+ subsampled_index_dims: Optional[Dict[str, PerceiverAbstractDecoder]] = None,
+ **decoder_kwargs,
+ ) -> None:
+ super().__init__()
+ self.modalities = nn.ModuleDict(modalities)
+ self.subsampled_index_dims = subsampled_index_dims
+ self.min_padding_size = min_padding_size
+ self.output_num_channels = output_num_channels
+ self.num_outputs = num_outputs
+ self.decoder = PerceiverBasicDecoder(
+ config,
+ output_index_dims=(num_outputs,),
+ output_num_channels=output_num_channels,
+ position_encoding_type="none",
+ num_channels=self.num_query_channels,
+ **decoder_kwargs,
+ )
+ self.padding = nn.ParameterDict(
+ {
+ modality: nn.Parameter(torch.randn(1, self.num_query_channels - decoder.num_query_channels))
+ for modality, decoder in modalities.items()
+ }
+ )
+
+ @property
+ def num_query_channels(self) -> int:
+ max_channel_size = max(decoder.num_query_channels for _, decoder in self.modalities.items())
+ common_channel_size = max_channel_size + self.min_padding_size
+ return common_channel_size
+
+ def decoder_query(self, inputs, modality_sizes, inputs_without_pos=None, subsampled_points=None):
+ # Partition the flat inputs among the different modalities
+ inputs = restructure(modality_sizes, inputs)
+
+ # Obtain modality-specific decoders' queries
+ subsampled_points = subsampled_points or {}
+
+ decoder_queries = {}
+ for modality, decoder in self.modalities.items():
+ # Get input_without_pos for this modality if it exists.
+ input_without_pos = None
+ if inputs_without_pos is not None:
+ input_without_pos = inputs_without_pos.get(modality, None)
+ query = decoder.decoder_query(
+ inputs=inputs[modality],
+ modality_sizes=None,
+ inputs_without_pos=input_without_pos,
+ subsampled_points=subsampled_points.get(modality, None),
+ )
+ decoder_queries[modality] = query
+
+ # Pad all queries with trainable position encodings to make them have the same channels
+
+ def embed(modality, x):
+ x = torch.reshape(x, [x.shape[0], np.prod(x.shape[1:-1]), x.shape[-1]])
+ pos = self.padding[modality]
+ pos = torch.broadcast_to(pos, [x.shape[0], x.shape[1], self.num_query_channels - x.shape[2]])
+ return torch.cat([x, pos], dim=2)
+
+ # Apply a predictable ordering to the modalities
+ return torch.cat(
+ [embed(modality, decoder_queries[modality]) for modality in sorted(self.modalities.keys())], dim=1
+ )
+
+ def forward(
+ self,
+ query: torch.Tensor,
+ z: torch.FloatTensor,
+ query_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> torch.Tensor:
+ # B x 1 x num_classes -> B x num_classes
+ decoder_outputs = self.decoder(query, z, output_attentions=output_attentions)
+
+ return decoder_outputs
+
+
+# Below: IO pre- and post-processor classes for Perceiver.
+def space_to_depth(frames: torch.Tensor, temporal_block_size: int = 1, spatial_block_size: int = 1) -> torch.Tensor:
+ """
+ Space to depth transform. Rearranges blocks of spatial data, into depth.
+
+ This function assumes the channels to be first, but will place the channels last after transformation.
+
+ Based on https://discuss.pytorch.org/t/is-there-any-layer-like-tensorflows-space-to-depth-function/3487/15.
+ """
+ if len(frames.shape) == 4:
+ batch_size, num_channels, height, width = frames.shape
+ # split up dimensions (height by spatial_block_size, width by spatial_block_size)
+ frames = frames.view(
+ batch_size,
+ num_channels,
+ height // spatial_block_size,
+ spatial_block_size,
+ width // spatial_block_size,
+ spatial_block_size,
+ )
+ # move blocks to last dimension: (batch_size, H//bs, W//bs, bs, bs, C)
+ frames = frames.permute(0, 2, 4, 3, 5, 1).contiguous()
+ # concatenate blocks along channel dimension: (batch_size, H//bs, W//bs, bs*bs*C)
+ frames = frames.view(
+ batch_size,
+ height // spatial_block_size,
+ width // spatial_block_size,
+ (spatial_block_size**2) * num_channels,
+ )
+ return frames
+ elif len(frames.shape) == 5:
+ batch_size, time, num_channels, height, width = frames.shape
+ # split up dimensions (time by temporal_block_size, height by spatial_block_size, width by spatial_block_size)
+ frames = frames.view(
+ batch_size,
+ time // temporal_block_size,
+ temporal_block_size,
+ num_channels,
+ height // spatial_block_size,
+ spatial_block_size,
+ width // spatial_block_size,
+ spatial_block_size,
+ )
+ # move blocks to last dimension: (batch_size, T//ts, H//bs, W//bs, ts, bs, bs, C)
+ frames = frames.permute(0, 1, 4, 6, 2, 5, 7, 3).contiguous()
+ # concatenate blocks along channel dimension: (batch_size, T//ts, H//bs, W//bs, ts*bs*bs*C)
+ frames = frames.view(
+ batch_size,
+ time // temporal_block_size,
+ height // spatial_block_size,
+ width // spatial_block_size,
+ temporal_block_size * (spatial_block_size**2) * num_channels,
+ )
+ return frames
+ else:
+ raise ValueError(
+ "Frames should be of rank 4 (batch, channels, height, width)"
+ " or rank 5 (batch, time, channels, height, width)"
+ )
+
+
+class Conv2dSamePadding(nn.Conv2d):
+ """
+ Conv2d layer with padding="same" support. Source:
+ https://gist.github.com/sumanmichael/4de9dee93f972d47c80c4ade8e149ea6
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(Conv2dSamePadding, self).__init__(*args, **kwargs)
+ self.zero_pad_2d = nn.ZeroPad2d(
+ reduce(__add__, [(k // 2 + (k - 2 * (k // 2)) - 1, k // 2) for k in self.kernel_size[::-1]])
+ )
+
+ def forward(self, input):
+ return self._conv_forward(self.zero_pad_2d(input), self.weight, self.bias)
+
+
+class Conv2DDownsample(nn.Module):
+ """Downsamples 4x by applying a 2D convolution and doing max pooling."""
+
+ def __init__(
+ self,
+ num_layers: int = 1,
+ in_channels: int = 3,
+ out_channels: int = 64,
+ use_batchnorm: bool = True,
+ ):
+ """
+ Constructs a Conv2DDownsample model.
+
+ Args:
+ in_channels (`int`, *optional*, defaults to 3):
+ The number of input channels.
+ out_channels (`int`, *optional*, defaults to 64):
+ The number of conv output channels.
+ use_batchnorm (`bool`, *optional*, defaults to `True`):
+ Whether to use batchnorm.
+ """
+ super().__init__()
+
+ self.conv = Conv2dSamePadding(
+ in_channels=in_channels, out_channels=out_channels, kernel_size=7, stride=2, bias=False
+ )
+ self.batchnorm = nn.BatchNorm2d(num_features=out_channels) if use_batchnorm else nn.Identity()
+ self.relu = nn.ReLU()
+ self.max_pool = nn.MaxPool2d(kernel_size=3, stride=2)
+
+ def forward(self, inputs: torch.Tensor) -> torch.Tensor:
+ out = self.conv(inputs)
+ out = self.batchnorm(out)
+ out = self.relu(out)
+ out = self.max_pool(out)
+ return out
+
+
+def generate_fourier_features(pos, num_bands, max_resolution=(224, 224), concat_pos=True, sine_only=False):
+ """
+ Generate a Fourier frequency position encoding with linear spacing.
+
+ Args:
+ pos (`torch.LongTensor` of shape `(batch_size, sequence_length, dim)`):
+ The Tensor containing the position of n points in d dimensional space.
+ num_bands (`int`):
+ The number of frequency bands (K) to use.
+ max_resolution (`Tuple[int]`, *optional*, defaults to (224, 224)):
+ The maximum resolution (i.e. the number of pixels per dim). A tuple representing resolution for each dimension.
+ concat_pos (`bool`, *optional*, defaults to `True`):
+ Whether to concatenate the input position encoding to the Fourier features.
+ sine_only (`bool`, *optional*, defaults to `False`):
+ Whether to use a single phase (sin) or two (sin/cos) for each frequency band.
+
+ Returns:
+ `torch.FloatTensor` of shape `(batch_size, sequence_length, n_channels)`: The Fourier position embeddings. If
+ `concat_pos` is `True` and `sine_only` is `False`, output dimensions are ordered as: [dim_1, dim_2, ..., dim_d,
+ sin(pi*f_1*dim_1), ..., sin(pi*f_K*dim_1), ..., sin(pi*f_1*dim_d), ..., sin(pi*f_K*dim_d), cos(pi*f_1*dim_1),
+ ..., cos(pi*f_K*dim_1), ..., cos(pi*f_1*dim_d), ..., cos(pi*f_K*dim_d)], where dim_i is pos[:, i] and f_k is the
+ kth frequency band.
+ """
+
+ batch_size = pos.shape[0]
+
+ min_freq = 1.0
+ # Nyquist frequency at the target resolution:
+ freq_bands = torch.stack(
+ [torch.linspace(start=min_freq, end=res / 2, steps=num_bands) for res in max_resolution], dim=0
+ )
+
+ # Get frequency bands for each spatial dimension.
+ # Output is size [n, d * num_bands]
+ per_pos_features = pos[0, :, :][:, :, None] * freq_bands[None, :, :]
+ per_pos_features = torch.reshape(per_pos_features, [-1, np.prod(per_pos_features.shape[1:])])
+
+ if sine_only:
+ # Output is size [n, d * num_bands]
+ per_pos_features = torch.sin(np.pi * (per_pos_features))
+ else:
+ # Output is size [n, 2 * d * num_bands]
+ per_pos_features = torch.cat(
+ [torch.sin(np.pi * per_pos_features), torch.cos(np.pi * per_pos_features)], dim=-1
+ )
+ # Concatenate the raw input positions.
+ if concat_pos:
+ # Adds d bands to the encoding.
+ per_pos_features = torch.cat([pos, per_pos_features.expand(batch_size, -1, -1)], dim=-1)
+ return per_pos_features
+
+
+def build_linear_positions(index_dims, output_range=(-1.0, 1.0)):
+ """
+ Generate an array of position indices for an N-D input array.
+
+ Args:
+ index_dims (`List[int]`):
+ The shape of the index dimensions of the input array.
+ output_range (`Tuple[float]`, *optional*, defaults to `(-1.0, 1.0)`):
+ The min and max values taken by each input index dimension.
+
+ Returns:
+ `torch.FloatTensor` of shape `(index_dims[0], index_dims[1], .., index_dims[-1], N)`.
+ """
+
+ def _linspace(n_xels_per_dim):
+ return torch.linspace(start=output_range[0], end=output_range[1], steps=n_xels_per_dim, dtype=torch.float32)
+
+ dim_ranges = [_linspace(n_xels_per_dim) for n_xels_per_dim in index_dims]
+ array_index_grid = meshgrid(*dim_ranges, indexing="ij")
+
+ return torch.stack(array_index_grid, dim=-1)
+
+
+class PerceiverAbstractPositionEncoding(nn.Module, metaclass=abc.ABCMeta):
+ """Perceiver abstract position encoding."""
+
+ @property
+ @abc.abstractmethod
+ def num_dimensions(self) -> int:
+ raise NotImplementedError
+
+ @abc.abstractmethod
+ def output_size(self, *args, **kwargs) -> int:
+ raise NotImplementedError
+
+ @abc.abstractmethod
+ def forward(self, batch_size, pos):
+ raise NotImplementedError
+
+
+class PerceiverTrainablePositionEncoding(PerceiverAbstractPositionEncoding):
+ """Trainable position encoding."""
+
+ def __init__(self, index_dims, num_channels=128):
+ super().__init__()
+ self._num_channels = num_channels
+ self._index_dims = index_dims
+ index_dim = np.prod(index_dims)
+ self.position_embeddings = nn.Parameter(torch.randn(index_dim, num_channels))
+
+ @property
+ def num_dimensions(self) -> int:
+ if isinstance(self._index_dims, int):
+ return 1
+ return len(self._index_dims)
+
+ def output_size(self, *args, **kwargs) -> int:
+ return self._num_channels
+
+ def forward(self, batch_size: int) -> torch.Tensor:
+ position_embeddings = self.position_embeddings
+
+ if batch_size is not None:
+ position_embeddings = position_embeddings.expand(batch_size, -1, -1)
+ return position_embeddings
+
+
+def _check_or_build_spatial_positions(pos, index_dims, batch_size):
+ """
+ Checks or builds spatial position features (x, y, ...).
+
+ Args:
+ pos (`torch.FloatTensor`):
+ None, or an array of position features. If None, position features are built. Otherwise, their size is checked.
+ index_dims (`List[int]`):
+ An iterable giving the spatial/index size of the data to be featurized.
+ batch_size (`int`):
+ The batch size of the data to be featurized.
+
+ Returns:
+ `torch.FloatTensor` of shape `(batch_size, prod(index_dims))` an array of position features.
+ """
+ if pos is None:
+ pos = build_linear_positions(index_dims)
+ # equivalent to `torch.broadcast_to(pos[None], (batch_size,) + pos.shape)`
+ # but `torch.broadcast_to` cannot be converted to ONNX
+ pos = pos[None].expand((batch_size,) + pos.shape)
+ pos = torch.reshape(pos, [batch_size, np.prod(index_dims), -1])
+ else:
+ # Just a warning label: you probably don't want your spatial features to
+ # have a different spatial layout than your pos coordinate system.
+ # But feel free to override if you think it'll work!
+ if pos.shape[-1] != len(index_dims):
+ raise ValueError("Spatial features have the wrong number of dimensions.")
+ return pos
+
+
+class PerceiverFourierPositionEncoding(PerceiverAbstractPositionEncoding):
+ """Fourier (Sinusoidal) position encoding."""
+
+ def __init__(self, num_bands, max_resolution, concat_pos=True, sine_only=False):
+ super().__init__()
+ self.num_bands = num_bands
+ self.max_resolution = max_resolution
+ self.concat_pos = concat_pos
+ self.sine_only = sine_only
+
+ @property
+ def num_dimensions(self) -> int:
+ return len(self.max_resolution)
+
+ def output_size(self):
+ """Returns size of positional encodings last dimension."""
+ num_dims = len(self.max_resolution)
+ encoding_size = self.num_bands * num_dims
+ if not self.sine_only:
+ encoding_size *= 2
+ if self.concat_pos:
+ encoding_size += self.num_dimensions
+
+ return encoding_size
+
+ def forward(
+ self,
+ index_dims: List[int],
+ batch_size: int,
+ device: torch.device,
+ dtype: torch.dtype,
+ pos: torch.FloatTensor = None,
+ ) -> torch.FloatTensor:
+ pos = _check_or_build_spatial_positions(pos, index_dims, batch_size)
+ fourier_pos_enc = generate_fourier_features(
+ pos,
+ num_bands=self.num_bands,
+ max_resolution=self.max_resolution,
+ concat_pos=self.concat_pos,
+ sine_only=self.sine_only,
+ ).to(device=device, dtype=dtype)
+ return fourier_pos_enc
+
+
+class AbstractPreprocessor(nn.Module):
+ @property
+ def num_channels(self) -> int:
+ """Returns size of preprocessor output."""
+ raise NotImplementedError()
+
+
+class PerceiverTextPreprocessor(AbstractPreprocessor):
+ """
+ Text preprocessing for Perceiver Encoder. Can be used to embed `inputs` and add positional encodings.
+
+ The dimensionality of the embeddings is determined by the `d_model` attribute of the configuration.
+
+ Args:
+ config ([`PerceiverConfig`]):
+ Model configuration.
+ """
+
+ def __init__(self, config: PerceiverConfig) -> None:
+ super().__init__()
+ self.config = config
+ self.embeddings = nn.Embedding(num_embeddings=config.vocab_size, embedding_dim=config.d_model)
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.d_model)
+
+ @property
+ def num_channels(self) -> int:
+ return self.config.d_model
+
+ def forward(self, inputs: torch.LongTensor, pos: Optional[torch.Tensor] = None, network_input_is_1d: bool = True):
+ embeddings_without_pos = self.embeddings(inputs)
+
+ seq_length = inputs.shape[1]
+ position_ids = torch.arange(0, seq_length, device=inputs.device)
+ embeddings = embeddings_without_pos + self.position_embeddings(position_ids)
+
+ return embeddings, None, embeddings_without_pos
+
+
+class PerceiverEmbeddingDecoder(nn.Module):
+ """
+ Module to decode embeddings (for masked language modeling).
+
+ Args:
+ config ([`PerceiverConfig`]):
+ Model configuration.
+ """
+
+ def __init__(self, config: PerceiverConfig) -> None:
+ super().__init__()
+ self.config = config
+ self.vocab_size = config.vocab_size
+ self.bias = nn.Parameter(torch.zeros(self.vocab_size))
+
+ def forward(self, hidden_states: torch.Tensor, embedding_layer: torch.Tensor) -> torch.Tensor:
+ batch_size, seq_len, d_model = hidden_states.shape
+ # Flatten batch dim
+ output = torch.matmul(hidden_states.reshape([-1, d_model]), embedding_layer.weight.transpose(0, 1))
+ output = output + self.bias
+
+ return output.reshape([batch_size, seq_len, self.vocab_size])
+
+
+class PerceiverMultimodalPostprocessor(nn.Module):
+ """
+ Multimodal postprocessing for Perceiver. Can be used to combine modality-specific postprocessors into a single
+ postprocessor.
+
+ Args:
+ modalities (`Mapping[str, PostprocessorType]`):
+ Dictionary mapping modality name to postprocessor class for that modality.
+ input_is_dict (`bool`, *optional*, defaults to `False`):
+ If True, input is assumed to be dictionary structured, and outputs keep the same dictionary shape. If
+ False, input is a tensor which is sliced up during postprocessing by *modality_sizes*.
+ """
+
+ def __init__(self, modalities: Mapping[str, PostprocessorType], input_is_dict: bool = False):
+ super().__init__()
+ self.modalities = nn.ModuleDict(modalities)
+ self.input_is_dict = input_is_dict
+
+ def forward(
+ self, inputs: torch.Tensor, pos: Optional[torch.Tensor] = None, modality_sizes=None
+ ) -> Mapping[str, torch.Tensor]:
+ if not self.input_is_dict:
+ # Slice up modalities by their sizes.
+ if modality_sizes is None:
+ raise ValueError("Modality sizes should be specified if input is not a dictionary.")
+ inputs = restructure(modality_sizes=modality_sizes, inputs=inputs)
+
+ outputs = {
+ modality: postprocessor(inputs[modality], pos=pos, modality_sizes=None)
+ for modality, postprocessor in self.modalities.items()
+ }
+ return outputs
+
+
+class PerceiverClassificationPostprocessor(nn.Module):
+ """
+ Classification postprocessing for Perceiver. Can be used to convert the decoder output to classification logits.
+
+ Args:
+ config ([*PerceiverConfig*]):
+ Model configuration.
+ in_channels (`int`):
+ Number of channels in the input.
+ """
+
+ def __init__(self, config: PerceiverConfig, in_channels: int) -> None:
+ super().__init__()
+ self.classifier = nn.Linear(in_channels, config.num_labels)
+
+ def forward(self, inputs, pos: Optional[torch.Tensor] = None, modality_sizes=None) -> torch.Tensor:
+ logits = self.classifier(inputs)
+ return logits[:, 0, :]
+
+
+class PerceiverAudioPostprocessor(nn.Module):
+ """
+ Audio postprocessing for Perceiver. Can be used to convert the decoder output to audio features.
+
+ Args:
+ config ([*PerceiverConfig*]):
+ Model configuration.
+ in_channels (`int`):
+ Number of channels in the input.
+ postproc_type (`str`, *optional*, defaults to `"patches"`):
+ Postprocessor type to use. Currently, only "patches" is supported.
+ """
+
+ def __init__(self, config: PerceiverConfig, in_channels: int, postproc_type: str = "patches") -> None:
+ super().__init__()
+
+ if postproc_type not in ("patches",): # to be supported: 'conv', 'patches', 'pixels'
+ raise ValueError("Invalid postproc_type!")
+
+ # Architecture parameters:
+ self.classifier = nn.Linear(in_channels, config.samples_per_patch)
+
+ def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor] = None, modality_sizes=None) -> torch.Tensor:
+ logits = self.classifier(inputs)
+ return torch.reshape(logits, [inputs.shape[0], -1])
+
+
+class PerceiverProjectionPostprocessor(nn.Module):
+ """
+ Projection postprocessing for Perceiver. Can be used to project the channels of the decoder output to a lower
+ dimension.
+
+ Args:
+ in_channels (`int`):
+ Number of channels in the input.
+ out_channels (`int`):
+ Number of channels in the output.
+ """
+
+ def __init__(self, in_channels: int, out_channels: int) -> None:
+ super().__init__()
+ self.classifier = nn.Linear(in_channels, out_channels)
+
+ def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor] = None, modality_sizes=None) -> torch.Tensor:
+ logits = self.classifier(inputs)
+ return logits
+
+
+class PerceiverImagePreprocessor(AbstractPreprocessor):
+ """
+ Image preprocessing for Perceiver Encoder.
+
+ Note: the *out_channels* argument refers to the output channels of a convolutional layer, if *prep_type* is set to
+ "conv1x1" or "conv". If one adds absolute position embeddings, one must make sure the *num_channels* of the
+ position encoding kwargs are set equal to the *out_channels*.
+
+ Args:
+ config ([*PerceiverConfig*]):
+ Model configuration.
+ prep_type (`str`, *optional*, defaults to `"conv"`):
+ Preprocessing type. Can be "conv1x1", "conv", "patches", "pixels".
+ spatial_downsample (`int`, *optional*, defaults to 4):
+ Spatial downsampling factor.
+ temporal_downsample (`int`, *optional*, defaults to 1):
+ Temporal downsampling factor (only relevant in case a time dimension is present).
+ position_encoding_type (`str`, *optional*, defaults to `"fourier"`):
+ Position encoding type. Can be "fourier" or "trainable".
+ in_channels (`int`, *optional*, defaults to 3):
+ Number of channels in the input.
+ out_channels (`int`, *optional*, defaults to 64):
+ Number of channels in the output.
+ conv_after_patching (`bool`, *optional*, defaults to `False`):
+ Whether to apply a convolutional layer after patching.
+ conv_after_patching_in_channels (`int`, *optional*, defaults to 54):
+ Number of channels in the input of the convolutional layer after patching.
+ conv2d_use_batchnorm (`bool`, *optional*, defaults to `True`):
+ Whether to use batch normalization in the convolutional layer.
+ concat_or_add_pos (`str`, *optional*, defaults to `"concat"`):
+ How to concatenate the position encoding to the input. Can be "concat" or "add".
+ project_pos_dim (`int`, *optional*, defaults to -1):
+ Dimension of the position encoding to project to. If -1, no projection is applied.
+ **position_encoding_kwargs (`Dict`, *optional*):
+ Keyword arguments for the position encoding.
+ """
+
+ def __init__(
+ self,
+ config,
+ prep_type="conv",
+ spatial_downsample: int = 4,
+ temporal_downsample: int = 1,
+ position_encoding_type: str = "fourier",
+ in_channels: int = 3,
+ out_channels: int = 64,
+ conv_after_patching: bool = False,
+ conv_after_patching_in_channels: int = 54, # only relevant when conv_after_patching = True
+ conv2d_use_batchnorm: bool = True,
+ concat_or_add_pos: str = "concat",
+ project_pos_dim: int = -1,
+ **position_encoding_kwargs,
+ ):
+ super().__init__()
+ self.config = config
+
+ if prep_type not in ("conv", "patches", "pixels", "conv1x1"):
+ raise ValueError(f"Prep_type {prep_type} is invalid")
+
+ if concat_or_add_pos not in ["concat", "add"]:
+ raise ValueError(f"Invalid value {concat_or_add_pos} for concat_or_add_pos.")
+
+ self.in_channels = in_channels
+ self.prep_type = prep_type
+ self.spatial_downsample = spatial_downsample
+ self.temporal_downsample = temporal_downsample
+ self.position_encoding_type = position_encoding_type
+ self.concat_or_add_pos = concat_or_add_pos
+ self.conv_after_patching = conv_after_patching
+ self.out_channels = out_channels
+
+ if self.prep_type == "conv":
+ # Downsampling with conv is currently restricted
+ convnet_num_layers = math.log(spatial_downsample, 4)
+ convnet_num_layers_is_int = convnet_num_layers == np.round(convnet_num_layers)
+ if not convnet_num_layers_is_int or temporal_downsample != 1:
+ raise ValueError(
+ "Only powers of 4 expected for spatial and 1 expected for temporal downsampling with conv."
+ )
+ self.convnet = Conv2DDownsample(
+ in_channels=in_channels,
+ num_layers=int(convnet_num_layers),
+ out_channels=out_channels,
+ use_batchnorm=conv2d_use_batchnorm,
+ )
+
+ elif self.prep_type == "conv1x1":
+ if temporal_downsample != 1:
+ raise ValueError("Conv1x1 does not downsample in time.")
+ self.convnet_1x1 = nn.Conv2d(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=(1, 1),
+ # spatial_downsample is unconstrained for 1x1 convolutions.
+ stride=(spatial_downsample, spatial_downsample),
+ )
+
+ # Position embeddings
+ self.project_pos_dim = project_pos_dim
+ self.position_embeddings, self.positions_projection = build_position_encoding(
+ position_encoding_type=position_encoding_type,
+ out_channels=out_channels,
+ project_pos_dim=project_pos_dim,
+ **position_encoding_kwargs,
+ )
+
+ # Optional convolutional layer after patches.
+ self.conv_after_patches = (
+ nn.Linear(conv_after_patching_in_channels, self.out_channels) if conv_after_patching else nn.Identity()
+ )
+
+ @property
+ def num_channels(self) -> int:
+ # Let's assume that the number of resolutions (in the context of image preprocessing)
+ # of the input data is 2 or 3 depending on whether we are processing image or video respectively.
+ # In this case, for convenience, we will declare is_temporal variable,
+ # which will show whether the data has a temporal dimension or not.
+ is_temporal = self.position_embeddings.num_dimensions > 2
+
+ # position embedding
+ if self.project_pos_dim > 0:
+ pos_dim = self.project_pos_dim
+ else:
+ pos_dim = self.position_embeddings.output_size()
+ if self.concat_or_add_pos == "add":
+ return pos_dim
+
+ # inputs
+ if self.conv_after_patching or self.prep_type in ("conv1x1", "conv"):
+ inp_dim = self.out_channels
+ elif self.prep_type == "pixels":
+ inp_dim = self.in_channels
+ if not is_temporal:
+ inp_dim = math.ceil(inp_dim / self.spatial_downsample)
+ elif self.prep_type == "patches":
+ if self.conv_after_patching:
+ inp_dim = self.out_channels
+ else:
+ inp_dim = self.in_channels * self.spatial_downsample**2
+ if is_temporal:
+ inp_dim *= self.temporal_downsample
+
+ return inp_dim + pos_dim
+
+ def _build_network_inputs(self, inputs: torch.Tensor, network_input_is_1d: bool = True):
+ """
+ Construct the final input, including position encoding.
+
+ This method expects the inputs to always have channels as last dimension.
+
+ """
+ batch_size = inputs.shape[0]
+ index_dims = inputs.shape[1:-1]
+ indices = np.prod(index_dims)
+
+ # Flatten input features to a 1D index dimension if necessary.
+ if len(inputs.shape) > 3 and network_input_is_1d:
+ inputs = torch.reshape(inputs, [batch_size, indices, -1])
+
+ # Construct the position encoding.
+ if self.position_encoding_type == "trainable":
+ pos_enc = self.position_embeddings(batch_size)
+ elif self.position_encoding_type == "fourier":
+ pos_enc = self.position_embeddings(index_dims, batch_size, device=inputs.device, dtype=inputs.dtype)
+
+ # Optionally project them to a target dimension.
+ pos_enc = self.positions_projection(pos_enc)
+
+ if not network_input_is_1d:
+ # Reshape pos to match the input feature shape
+ # if the network takes non-1D inputs
+ sh = inputs.shape
+ pos_enc = torch.reshape(pos_enc, list(sh)[:-1] + [-1])
+ if self.concat_or_add_pos == "concat":
+ inputs_with_pos = torch.cat([inputs, pos_enc], dim=-1)
+ elif self.concat_or_add_pos == "add":
+ inputs_with_pos = inputs + pos_enc
+ return inputs_with_pos, inputs
+
+ def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor] = None, network_input_is_1d: bool = True):
+ if self.prep_type == "conv":
+ # Convnet image featurization.
+ # Downsamples spatially by a factor of 4
+ inputs = self.convnet(inputs)
+
+ elif self.prep_type == "conv1x1":
+ # map inputs to self.out_channels
+ inputs = self.convnet_1x1(inputs)
+
+ elif self.prep_type == "pixels":
+ # if requested, downsamples in the crudest way
+ if inputs.ndim == 4:
+ inputs = inputs[:: self.spatial_downsample, :: self.spatial_downsample]
+ elif inputs.ndim == 5:
+ inputs = inputs[
+ :, :: self.temporal_downsample, :, :: self.spatial_downsample, :: self.spatial_downsample
+ ]
+ else:
+ raise ValueError("Unsupported data format for pixels.")
+
+ elif self.prep_type == "patches":
+ # Space2depth featurization.
+ # Video: B x T x C x H x W
+ inputs = space_to_depth(
+ inputs, temporal_block_size=self.temporal_downsample, spatial_block_size=self.spatial_downsample
+ )
+
+ if inputs.ndim == 5 and inputs.shape[1] == 1:
+ # for flow
+ inputs = inputs.squeeze(dim=1)
+
+ # Optionally apply conv layer.
+ inputs = self.conv_after_patches(inputs)
+
+ if self.prep_type != "patches":
+ # move channels to last dimension, as the _build_network_inputs method below expects this
+ if inputs.ndim == 4:
+ inputs = inputs.permute(0, 2, 3, 1)
+ elif inputs.ndim == 5:
+ inputs = inputs.permute(0, 1, 3, 4, 2)
+ else:
+ raise ValueError("Unsupported data format for conv1x1.")
+
+ inputs, inputs_without_pos = self._build_network_inputs(inputs, network_input_is_1d)
+ modality_sizes = None # Size for each modality, only needed for multimodal
+
+ return inputs, modality_sizes, inputs_without_pos
+
+
+class PerceiverOneHotPreprocessor(AbstractPreprocessor):
+ """
+ One-hot preprocessor for Perceiver Encoder. Can be used to add a dummy index dimension to the input.
+
+ Args:
+ config ([`PerceiverConfig`]):
+ Model configuration.
+ """
+
+ def __init__(self, config: PerceiverConfig) -> None:
+ super().__init__()
+ self.config: PerceiverConfig = config
+
+ @property
+ def num_channels(self) -> int:
+ return self.config.num_labels
+
+ def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor] = None, network_input_is_1d: bool = True):
+ # Add a dummy index dimension.
+ inputs = inputs[:, None, :]
+
+ # No position encodings, so the 1st (input) and 3rd (inputs_without_pos)
+ # outputs are identical.
+ return inputs, None, inputs
+
+
+class PerceiverAudioPreprocessor(AbstractPreprocessor):
+ """
+ Audio preprocessing for Perceiver Encoder.
+
+ Args:
+ config ([*PerceiverConfig*]):
+ Model configuration.
+ prep_type (`str`, *optional*, defaults to `"patches"`):
+ Preprocessor type to use. Only "patches" is supported.
+ samples_per_patch (`int`, *optional*, defaults to 96):
+ Number of samples per patch.
+ position_encoding_type (`str`, *optional*, defaults to `"fourier"`):
+ Type of position encoding to use. Can be "trainable" or "fourier".
+ concat_or_add_pos (`str`, *optional*, defaults to `"concat"`):
+ How to concatenate the position encoding to the input. Can be "concat" or "add".
+ out_channels (`int`, *optional*, defaults to 64):
+ Number of channels in the output.
+ project_pos_dim (`int`, *optional*, defaults to -1):
+ Dimension of the position encoding to project to. If -1, no projection is applied.
+ **position_encoding_kwargs (`Dict`, *optional*):
+ Keyword arguments for the position encoding.
+ """
+
+ def __init__(
+ self,
+ config,
+ prep_type: str = "patches",
+ samples_per_patch: int = 96,
+ position_encoding_type: str = "fourier",
+ concat_or_add_pos: str = "concat",
+ out_channels=64,
+ project_pos_dim=-1,
+ **position_encoding_kwargs,
+ ):
+ super().__init__()
+ self.config = config
+
+ if prep_type not in ("patches",):
+ raise ValueError(f"Prep_type {prep_type} is invalid, can only be 'patches'.")
+
+ if concat_or_add_pos not in ["concat", "add"]:
+ raise ValueError(f"Concat_or_pos {concat_or_add_pos} is invalid, can only be 'concat' or 'add'.")
+
+ self.samples_per_patch = samples_per_patch
+ self.position_encoding_type = position_encoding_type
+ self.concat_or_add_pos = concat_or_add_pos
+ self.project_pos_dim = project_pos_dim
+
+ # Position embeddings
+ self.position_embeddings, self.positions_projection = build_position_encoding(
+ position_encoding_type=position_encoding_type,
+ out_channels=out_channels,
+ project_pos_dim=project_pos_dim,
+ **position_encoding_kwargs,
+ )
+
+ @property
+ def num_channels(self) -> int:
+ # position embedding
+ if self.project_pos_dim > 0:
+ pos_dim = self.project_pos_dim
+ else:
+ pos_dim = self.position_embeddings.output_size()
+ if self.concat_or_add_pos == "add":
+ return pos_dim
+ return self.samples_per_patch + pos_dim
+
+ def _build_network_inputs(self, inputs):
+ """Construct the final input, including position encoding."""
+ batch_size = inputs.shape[0]
+ index_dims = inputs.shape[1:-1]
+
+ # Construct the position encoding.
+ if self.position_encoding_type == "trainable":
+ pos_enc = self.position_embeddings(batch_size)
+ elif self.position_encoding_type == "fourier":
+ pos_enc = self.position_embeddings(index_dims, batch_size, device=inputs.device, dtype=inputs.dtype)
+
+ # Optionally project them to a target dimension.
+ pos_enc = self.positions_projection(pos_enc)
+
+ if self.concat_or_add_pos == "concat":
+ inputs_with_pos = torch.cat([inputs, pos_enc], dim=-1)
+ elif self.concat_or_add_pos == "add":
+ inputs_with_pos = inputs + pos_enc
+
+ return inputs_with_pos, inputs
+
+ def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor] = None, network_input_is_1d: bool = True):
+ inputs = torch.reshape(inputs, [inputs.shape[0], -1, self.samples_per_patch])
+
+ inputs, inputs_without_pos = self._build_network_inputs(inputs)
+ modality_sizes = None # Size for each modality, only needed for multimodal
+
+ return inputs, modality_sizes, inputs_without_pos
+
+
+class PerceiverMultimodalPreprocessor(AbstractPreprocessor):
+ """
+ Multimodal preprocessing for Perceiver Encoder.
+
+ Inputs for each modality are preprocessed, then padded with trainable position embeddings to have the same number
+ of channels.
+
+ Args:
+ modalities (`Mapping[str, PreprocessorType]`):
+ Dict mapping modality name to preprocessor.
+ mask_probs (`Dict[str, float]`):
+ Dict mapping modality name to masking probability of that modality.
+ min_padding_size (`int`, *optional*, defaults to 2):
+ The minimum padding size for all modalities. The final output will have num_channels equal to the maximum
+ channels across all modalities plus min_padding_size.
+ """
+
+ def __init__(
+ self,
+ modalities: Mapping[str, PreprocessorType],
+ mask_probs: Optional[Mapping[str, float]] = None,
+ min_padding_size: int = 2,
+ ):
+ super().__init__()
+ self.modalities = nn.ModuleDict(modalities)
+ self.min_padding_size = min_padding_size
+ self.mask_probs = mask_probs if mask_probs is not None else {}
+ self.padding = nn.ParameterDict(
+ {
+ modality: nn.Parameter(torch.randn(1, self.num_channels - preprocessor.num_channels))
+ for modality, preprocessor in modalities.items()
+ }
+ )
+ self.mask = nn.ParameterDict(
+ {modality: nn.Parameter(torch.randn(1, self.num_channels)) for modality, _ in self.mask_probs.items()}
+ )
+
+ @property
+ def num_channels(self) -> int:
+ max_channel_size = max(processor.num_channels for _, processor in self.modalities.items())
+ common_channel_size = max_channel_size + self.min_padding_size
+ return common_channel_size
+
+ def forward(
+ self, inputs: Mapping[str, torch.Tensor], pos: Optional[torch.Tensor] = None, network_input_is_1d: bool = True
+ ) -> PreprocessorOutputType:
+ padded = {}
+ modality_sizes = {}
+ inputs_without_pos = {}
+ for modality, preprocessor in self.modalities.items():
+ # preprocess each modality using the respective preprocessor.
+ output, _, inputs_without_pos[modality] = preprocessor(
+ inputs[modality], pos=pos, network_input_is_1d=network_input_is_1d
+ )
+
+ # pad to the same common_channel_size.
+ batch_size, num_samples, num_channels = output.shape
+ pos_enc = self.padding[modality].expand(batch_size, -1, -1)
+
+ padding = torch.broadcast_to(
+ pos_enc,
+ [batch_size, num_samples, self.num_channels - num_channels],
+ )
+ output_padded = torch.cat([output, padding], dim=2)
+
+ # mask if required
+ if modality in self.mask_probs:
+ mask_token = self.mask[modality].expand(batch_size, -1, -1)
+ mask_prob = self.mask_probs[modality]
+ mask = torch.bernoulli(torch.full([batch_size, num_samples], mask_prob))
+ mask = torch.unsqueeze(mask, dim=2).to(mask_token.device)
+ output_padded = (1 - mask) * output_padded + mask * mask_token
+
+ padded[modality] = output_padded
+ modality_sizes[modality] = output_padded.shape[1]
+
+ # Apply a predictable ordering to the modalities
+ padded_ls = [padded[k] for k in sorted(padded.keys())]
+
+ # Finally, concatenate along the time dimension
+ final_inputs = torch.cat(padded_ls, dim=1)
+
+ return final_inputs, modality_sizes, inputs_without_pos
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/tokenization_perceiver.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/tokenization_perceiver.py
new file mode 100644
index 0000000000000000000000000000000000000000..b4ec1e378e567143c6636da6f192c31a7be9e7b9
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/perceiver/tokenization_perceiver.py
@@ -0,0 +1,198 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Tokenization class for Perceiver."""
+
+
+from typing import Dict, List, Optional, Tuple
+
+from ...tokenization_utils import AddedToken, PreTrainedTokenizer
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class PerceiverTokenizer(PreTrainedTokenizer):
+ """
+ Construct a Perceiver tokenizer. The Perceiver simply uses raw bytes utf-8 encoding.
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
+ The token used for padding, for example when batching sequences of different lengths.
+ bos_token (`str`, *optional*, defaults to `"[BOS]"`):
+ The BOS token (reserved in the vocab, but not actually used).
+ eos_token (`str`, *optional*, defaults to `"[EOS]"`):
+ The end of sequence token (reserved in the vocab, but not actually used).
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
+ The token used is the `sep_token`.
+
+
+
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
+ The MASK token, useful for masked language modeling.
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The CLS token (reserved in the vocab, but not actually used).
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The separator token, which is used when building a sequence from two sequences.
+
+ """
+
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ pad_token="[PAD]",
+ bos_token="[BOS]",
+ eos_token="[EOS]",
+ mask_token="[MASK]",
+ cls_token="[CLS]",
+ sep_token="[SEP]",
+ model_max_length=2048,
+ **kwargs,
+ ) -> None:
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
+ mask_token = AddedToken(mask_token, lstrip=False, rstrip=False) if isinstance(mask_token, str) else mask_token
+ cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
+ sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
+
+ self._utf_vocab_size = 2**8 # utf is 8 bits
+
+ # Since these tokens are not part of the vocabulary, we manually add them
+ self._added_tokens_decoder: Dict[str, int] = {
+ 0: pad_token,
+ 1: bos_token,
+ 2: eos_token,
+ 3: mask_token,
+ 4: cls_token,
+ 5: sep_token,
+ }
+ self._num_special_tokens = len(self._added_tokens_decoder)
+ super().__init__(
+ pad_token=pad_token,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ mask_token=mask_token,
+ cls_token=cls_token,
+ sep_token=sep_token,
+ model_max_length=model_max_length,
+ **kwargs,
+ )
+
+ def get_vocab(self) -> Dict[str, int]:
+ vocab = {}
+ for i in range(self._utf_vocab_size):
+ token = chr(i)
+ vocab[token] = i + self._num_special_tokens
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ @property
+ def vocab_size(self):
+ return self._utf_vocab_size
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ # normal case: some special tokens
+ if token_ids_1 is None:
+ return [1] + [0] * len(token_ids_0) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks. A sequence has the
+ following format:
+
+ - single sequence: `[CLS] X [SEP]`
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+ else:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] + token_ids_1 + [self.sep_token_id]
+
+ def _tokenize(self, text: str) -> List[str]:
+ """Take as input a string and return a list of strings (tokens) for words/sub-words"""
+ tokens = [chr(i) for i in text.encode("utf-8")]
+ return tokens
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ if len(token) != 1:
+ token_id = self.unk_token_id
+ else:
+ token_id = ord(token) + self._num_special_tokens
+ return token_id
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ token = chr(index - self._num_special_tokens)
+ return token
+
+ # TODO @ArthurZ refactor this as well....
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ bstring = b""
+ for token in tokens:
+ if token in self.added_tokens_encoder:
+ tok_string = str(token).encode("utf-8")
+ else:
+ tok_string = bytes([ord(token)])
+ bstring += tok_string
+ string = bstring.decode("utf-8", errors="replace")
+ return string
+
+ # PerceiverTokenizer has no vocab file
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ return ()
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/convert_fairseq2_to_hf.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/convert_fairseq2_to_hf.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a427b42084e51a134baba301565fb66a2e7587fc
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/convert_fairseq2_to_hf.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/modeling_seamless_m4t_v2.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/modeling_seamless_m4t_v2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1d495bc4f71337e4b5b3ef005ad244aaef6b39d4
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/modeling_seamless_m4t_v2.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/superpoint/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/superpoint/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..313767c02dda89ccb6c3691c56843bb3559be7ca
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/superpoint/__init__.py
@@ -0,0 +1,77 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+# rely on isort to merge the imports
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
+
+
+_import_structure = {
+ "configuration_superpoint": [
+ "SUPERPOINT_PRETRAINED_CONFIG_ARCHIVE_MAP",
+ "SuperPointConfig",
+ ]
+}
+
+try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["image_processing_superpoint"] = ["SuperPointImageProcessor"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_superpoint"] = [
+ "SUPERPOINT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "SuperPointForKeypointDetection",
+ "SuperPointPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_superpoint import (
+ SUPERPOINT_PRETRAINED_CONFIG_ARCHIVE_MAP,
+ SuperPointConfig,
+ )
+
+ try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .image_processing_superpoint import SuperPointImageProcessor
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_superpoint import (
+ SUPERPOINT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ SuperPointForKeypointDetection,
+ SuperPointPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/superpoint/__pycache__/configuration_superpoint.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/superpoint/__pycache__/configuration_superpoint.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..64562f295d9b514d4dce5e499a5fedacaf50c243
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/superpoint/__pycache__/configuration_superpoint.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/superpoint/__pycache__/convert_superpoint_to_pytorch.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/superpoint/__pycache__/convert_superpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..53368a8c4787349864696ca5efe19a9d0de1b55e
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/superpoint/__pycache__/convert_superpoint_to_pytorch.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/superpoint/__pycache__/modeling_superpoint.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/superpoint/__pycache__/modeling_superpoint.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..137bd3fcc7c9582faf9fc3f7ab082301b220b574
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/superpoint/__pycache__/modeling_superpoint.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/superpoint/configuration_superpoint.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/superpoint/configuration_superpoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..5970a6e1b4134d08d1fa17f69bbf50316d341665
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/superpoint/configuration_superpoint.py
@@ -0,0 +1,91 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import List
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+SUPERPOINT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "magic-leap-community/superpoint": "https://huggingface.co/magic-leap-community/superpoint/blob/main/config.json"
+}
+
+
+class SuperPointConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`SuperPointForKeypointDetection`]. It is used to instantiate a
+ SuperPoint model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the SuperPoint
+ [magic-leap-community/superpoint](https://huggingface.co/magic-leap-community/superpoint) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ encoder_hidden_sizes (`List`, *optional*, defaults to `[64, 64, 128, 128]`):
+ The number of channels in each convolutional layer in the encoder.
+ decoder_hidden_size (`int`, *optional*, defaults to 256): The hidden size of the decoder.
+ keypoint_decoder_dim (`int`, *optional*, defaults to 65): The output dimension of the keypoint decoder.
+ descriptor_decoder_dim (`int`, *optional*, defaults to 256): The output dimension of the descriptor decoder.
+ keypoint_threshold (`float`, *optional*, defaults to 0.005):
+ The threshold to use for extracting keypoints.
+ max_keypoints (`int`, *optional*, defaults to -1):
+ The maximum number of keypoints to extract. If `-1`, will extract all keypoints.
+ nms_radius (`int`, *optional*, defaults to 4):
+ The radius for non-maximum suppression.
+ border_removal_distance (`int`, *optional*, defaults to 4):
+ The distance from the border to remove keypoints.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+
+ Example:
+ ```python
+ >>> from transformers import SuperPointConfig, SuperPointForKeypointDetection
+
+ >>> # Initializing a SuperPoint superpoint style configuration
+ >>> configuration = SuperPointConfig()
+ >>> # Initializing a model from the superpoint style configuration
+ >>> model = SuperPointForKeypointDetection(configuration)
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "superpoint"
+
+ def __init__(
+ self,
+ encoder_hidden_sizes: List[int] = [64, 64, 128, 128],
+ decoder_hidden_size: int = 256,
+ keypoint_decoder_dim: int = 65,
+ descriptor_decoder_dim: int = 256,
+ keypoint_threshold: float = 0.005,
+ max_keypoints: int = -1,
+ nms_radius: int = 4,
+ border_removal_distance: int = 4,
+ initializer_range=0.02,
+ **kwargs,
+ ):
+ self.encoder_hidden_sizes = encoder_hidden_sizes
+ self.decoder_hidden_size = decoder_hidden_size
+ self.keypoint_decoder_dim = keypoint_decoder_dim
+ self.descriptor_decoder_dim = descriptor_decoder_dim
+ self.keypoint_threshold = keypoint_threshold
+ self.max_keypoints = max_keypoints
+ self.nms_radius = nms_radius
+ self.border_removal_distance = border_removal_distance
+ self.initializer_range = initializer_range
+
+ super().__init__(**kwargs)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/superpoint/convert_superpoint_to_pytorch.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/superpoint/convert_superpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..18755bf4fe01b2b6de2a0a2e0970df7f06909c5a
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/superpoint/convert_superpoint_to_pytorch.py
@@ -0,0 +1,175 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import argparse
+import os
+
+import requests
+import torch
+from PIL import Image
+
+from transformers import SuperPointConfig, SuperPointForKeypointDetection, SuperPointImageProcessor
+
+
+def get_superpoint_config():
+ config = SuperPointConfig(
+ encoder_hidden_sizes=[64, 64, 128, 128],
+ decoder_hidden_size=256,
+ keypoint_decoder_dim=65,
+ descriptor_decoder_dim=256,
+ keypoint_threshold=0.005,
+ max_keypoints=-1,
+ nms_radius=4,
+ border_removal_distance=4,
+ initializer_range=0.02,
+ )
+
+ return config
+
+
+def create_rename_keys(config, state_dict):
+ rename_keys = []
+
+ # Encoder weights
+ rename_keys.append(("conv1a.weight", "encoder.conv_blocks.0.conv_a.weight"))
+ rename_keys.append(("conv1b.weight", "encoder.conv_blocks.0.conv_b.weight"))
+ rename_keys.append(("conv2a.weight", "encoder.conv_blocks.1.conv_a.weight"))
+ rename_keys.append(("conv2b.weight", "encoder.conv_blocks.1.conv_b.weight"))
+ rename_keys.append(("conv3a.weight", "encoder.conv_blocks.2.conv_a.weight"))
+ rename_keys.append(("conv3b.weight", "encoder.conv_blocks.2.conv_b.weight"))
+ rename_keys.append(("conv4a.weight", "encoder.conv_blocks.3.conv_a.weight"))
+ rename_keys.append(("conv4b.weight", "encoder.conv_blocks.3.conv_b.weight"))
+ rename_keys.append(("conv1a.bias", "encoder.conv_blocks.0.conv_a.bias"))
+ rename_keys.append(("conv1b.bias", "encoder.conv_blocks.0.conv_b.bias"))
+ rename_keys.append(("conv2a.bias", "encoder.conv_blocks.1.conv_a.bias"))
+ rename_keys.append(("conv2b.bias", "encoder.conv_blocks.1.conv_b.bias"))
+ rename_keys.append(("conv3a.bias", "encoder.conv_blocks.2.conv_a.bias"))
+ rename_keys.append(("conv3b.bias", "encoder.conv_blocks.2.conv_b.bias"))
+ rename_keys.append(("conv4a.bias", "encoder.conv_blocks.3.conv_a.bias"))
+ rename_keys.append(("conv4b.bias", "encoder.conv_blocks.3.conv_b.bias"))
+
+ # Keypoint Decoder weights
+ rename_keys.append(("convPa.weight", "keypoint_decoder.conv_score_a.weight"))
+ rename_keys.append(("convPb.weight", "keypoint_decoder.conv_score_b.weight"))
+ rename_keys.append(("convPa.bias", "keypoint_decoder.conv_score_a.bias"))
+ rename_keys.append(("convPb.bias", "keypoint_decoder.conv_score_b.bias"))
+
+ # Descriptor Decoder weights
+ rename_keys.append(("convDa.weight", "descriptor_decoder.conv_descriptor_a.weight"))
+ rename_keys.append(("convDb.weight", "descriptor_decoder.conv_descriptor_b.weight"))
+ rename_keys.append(("convDa.bias", "descriptor_decoder.conv_descriptor_a.bias"))
+ rename_keys.append(("convDb.bias", "descriptor_decoder.conv_descriptor_b.bias"))
+
+ return rename_keys
+
+
+def rename_key(dct, old, new):
+ val = dct.pop(old)
+ dct[new] = val
+
+
+def prepare_imgs():
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ im1 = Image.open(requests.get(url, stream=True).raw)
+ url = "http://images.cocodataset.org/test-stuff2017/000000004016.jpg"
+ im2 = Image.open(requests.get(url, stream=True).raw)
+ return [im1, im2]
+
+
+@torch.no_grad()
+def convert_superpoint_checkpoint(checkpoint_url, pytorch_dump_folder_path, save_model, push_to_hub, test_mode=False):
+ """
+ Copy/paste/tweak model's weights to our SuperPoint structure.
+ """
+
+ print("Downloading original model from checkpoint...")
+ config = get_superpoint_config()
+
+ # load original state_dict from URL
+ original_state_dict = torch.hub.load_state_dict_from_url(checkpoint_url)
+
+ print("Converting model parameters...")
+ # rename keys
+ rename_keys = create_rename_keys(config, original_state_dict)
+ new_state_dict = original_state_dict.copy()
+ for src, dest in rename_keys:
+ rename_key(new_state_dict, src, dest)
+
+ # Load HuggingFace model
+ model = SuperPointForKeypointDetection(config)
+ model.load_state_dict(new_state_dict)
+ model.eval()
+ print("Successfully loaded weights in the model")
+
+ # Check model outputs
+ preprocessor = SuperPointImageProcessor()
+ inputs = preprocessor(images=prepare_imgs(), return_tensors="pt")
+ outputs = model(**inputs)
+
+ # If test_mode is True, we check that the model outputs match the original results
+ if test_mode:
+ torch.count_nonzero(outputs.mask[0])
+ expected_keypoints_shape = (2, 830, 2)
+ expected_scores_shape = (2, 830)
+ expected_descriptors_shape = (2, 830, 256)
+
+ expected_keypoints_values = torch.tensor([[480.0, 9.0], [494.0, 9.0], [489.0, 16.0]])
+ expected_scores_values = torch.tensor([0.0064, 0.0140, 0.0595, 0.0728, 0.5170, 0.0175, 0.1523, 0.2055, 0.0336])
+ expected_descriptors_value = torch.tensor(-0.1096)
+ assert outputs.keypoints.shape == expected_keypoints_shape
+ assert outputs.scores.shape == expected_scores_shape
+ assert outputs.descriptors.shape == expected_descriptors_shape
+
+ assert torch.allclose(outputs.keypoints[0, :3], expected_keypoints_values, atol=1e-3)
+ assert torch.allclose(outputs.scores[0, :9], expected_scores_values, atol=1e-3)
+ assert torch.allclose(outputs.descriptors[0, 0, 0], expected_descriptors_value, atol=1e-3)
+ print("Model outputs match the original results!")
+
+ if save_model:
+ print("Saving model to local...")
+ # Create folder to save model
+ if not os.path.isdir(pytorch_dump_folder_path):
+ os.mkdir(pytorch_dump_folder_path)
+
+ model.save_pretrained(pytorch_dump_folder_path)
+ preprocessor.save_pretrained(pytorch_dump_folder_path)
+
+ model_name = "superpoint"
+ if push_to_hub:
+ print(f"Pushing {model_name} to the hub...")
+ model.push_to_hub(model_name)
+ preprocessor.push_to_hub(model_name)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--checkpoint_url",
+ default="https://github.com/magicleap/SuperPointPretrainedNetwork/raw/master/superpoint_v1.pth",
+ type=str,
+ help="URL of the original SuperPoint checkpoint you'd like to convert.",
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path",
+ default="model",
+ type=str,
+ help="Path to the output PyTorch model directory.",
+ )
+ parser.add_argument("--save_model", action="store_true", help="Save model to local")
+ parser.add_argument("--push_to_hub", action="store_true", help="Push model and image preprocessor to the hub")
+
+ args = parser.parse_args()
+ convert_superpoint_checkpoint(
+ args.checkpoint_url, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/superpoint/image_processing_superpoint.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/superpoint/image_processing_superpoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..8c7e2a7debacd510ec6de347a3f94e0f67afc9bf
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/superpoint/image_processing_superpoint.py
@@ -0,0 +1,272 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Image processor class for SuperPoint."""
+
+from typing import Dict, Optional, Union
+
+import numpy as np
+
+from ... import is_vision_available, requires_backends
+from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
+from ...image_transforms import resize, to_channel_dimension_format
+from ...image_utils import (
+ ChannelDimension,
+ ImageInput,
+ infer_channel_dimension_format,
+ is_scaled_image,
+ make_list_of_images,
+ to_numpy_array,
+ valid_images,
+)
+from ...utils import TensorType, logging
+
+
+if is_vision_available():
+ import PIL
+
+logger = logging.get_logger(__name__)
+
+
+def is_grayscale(
+ image: ImageInput,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+):
+ if input_data_format == ChannelDimension.FIRST:
+ return np.all(image[0, ...] == image[1, ...]) and np.all(image[1, ...] == image[2, ...])
+ elif input_data_format == ChannelDimension.LAST:
+ return np.all(image[..., 0] == image[..., 1]) and np.all(image[..., 1] == image[..., 2])
+
+
+def convert_to_grayscale(
+ image: ImageInput,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+) -> ImageInput:
+ """
+ Converts an image to grayscale format using the NTSC formula. Only support numpy and PIL Image. TODO support torch
+ and tensorflow grayscale conversion
+
+ This function is supposed to return a 1-channel image, but it returns a 3-channel image with the same value in each
+ channel, because of an issue that is discussed in :
+ https://github.com/huggingface/transformers/pull/25786#issuecomment-1730176446
+
+ Args:
+ image (Image):
+ The image to convert.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image.
+ """
+ requires_backends(convert_to_grayscale, ["vision"])
+
+ if isinstance(image, np.ndarray):
+ if input_data_format == ChannelDimension.FIRST:
+ gray_image = image[0, ...] * 0.2989 + image[1, ...] * 0.5870 + image[2, ...] * 0.1140
+ gray_image = np.stack([gray_image] * 3, axis=0)
+ elif input_data_format == ChannelDimension.LAST:
+ gray_image = image[..., 0] * 0.2989 + image[..., 1] * 0.5870 + image[..., 2] * 0.1140
+ gray_image = np.stack([gray_image] * 3, axis=-1)
+ return gray_image
+
+ if not isinstance(image, PIL.Image.Image):
+ return image
+
+ image = image.convert("L")
+ return image
+
+
+class SuperPointImageProcessor(BaseImageProcessor):
+ r"""
+ Constructs a SuperPoint image processor.
+
+ Args:
+ do_resize (`bool`, *optional*, defaults to `True`):
+ Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overriden
+ by `do_resize` in the `preprocess` method.
+ size (`Dict[str, int]` *optional*, defaults to `{"height": 480, "width": 640}`):
+ Resolution of the output image after `resize` is applied. Only has an effect if `do_resize` is set to
+ `True`. Can be overriden by `size` in the `preprocess` method.
+ do_rescale (`bool`, *optional*, defaults to `True`):
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overriden by `do_rescale` in
+ the `preprocess` method.
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
+ Scale factor to use if rescaling the image. Can be overriden by `rescale_factor` in the `preprocess`
+ method.
+ """
+
+ model_input_names = ["pixel_values"]
+
+ def __init__(
+ self,
+ do_resize: bool = True,
+ size: Dict[str, int] = None,
+ do_rescale: bool = True,
+ rescale_factor: float = 1 / 255,
+ **kwargs,
+ ) -> None:
+ super().__init__(**kwargs)
+ size = size if size is not None else {"height": 480, "width": 640}
+ size = get_size_dict(size, default_to_square=False)
+
+ self.do_resize = do_resize
+ self.size = size
+ self.do_rescale = do_rescale
+ self.rescale_factor = rescale_factor
+
+ def resize(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ):
+ """
+ Resize an image.
+
+ Args:
+ image (`np.ndarray`):
+ Image to resize.
+ size (`Dict[str, int]`):
+ Dictionary of the form `{"height": int, "width": int}`, specifying the size of the output image.
+ data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format of the output image. If not provided, it will be inferred from the input
+ image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ """
+ size = get_size_dict(size, default_to_square=False)
+
+ return resize(
+ image,
+ size=(size["height"], size["width"]),
+ data_format=data_format,
+ input_data_format=input_data_format,
+ **kwargs,
+ )
+
+ def preprocess(
+ self,
+ images,
+ do_resize: bool = None,
+ size: Dict[str, int] = None,
+ do_rescale: bool = None,
+ rescale_factor: float = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ data_format: ChannelDimension = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> BatchFeature:
+ """
+ Preprocess an image or batch of images.
+
+ Args:
+ images (`ImageInput`):
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
+ Whether to resize the image.
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
+ Size of the output image after `resize` has been applied. If `size["shortest_edge"]` >= 384, the image
+ is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the
+ image will be matched to `int(size["shortest_edge"]/ crop_pct)`, after which the image is cropped to
+ `(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`.
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
+ Whether to rescale the image values between [0 - 1].
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Can be one of:
+ - Unset: Return a list of `np.ndarray`.
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format for the output image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - Unset: Use the channel dimension format of the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ """
+
+ do_resize = do_resize if do_resize is not None else self.do_resize
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
+
+ size = size if size is not None else self.size
+ size = get_size_dict(size, default_to_square=False)
+
+ images = make_list_of_images(images)
+
+ if not valid_images(images):
+ raise ValueError(
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+
+ if do_resize and size is None:
+ raise ValueError("Size must be specified if do_resize is True.")
+
+ if do_rescale and rescale_factor is None:
+ raise ValueError("Rescale factor must be specified if do_rescale is True.")
+
+ # All transformations expect numpy arrays.
+ images = [to_numpy_array(image) for image in images]
+
+ if is_scaled_image(images[0]) and do_rescale:
+ logger.warning_once(
+ "It looks like you are trying to rescale already rescaled images. If the input"
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
+ )
+
+ if input_data_format is None:
+ # We assume that all images have the same channel dimension format.
+ input_data_format = infer_channel_dimension_format(images[0])
+
+ if do_resize:
+ images = [self.resize(image=image, size=size, input_data_format=input_data_format) for image in images]
+
+ if do_rescale:
+ images = [
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ if input_data_format is None:
+ # We assume that all images have the same channel dimension format.
+ input_data_format = infer_channel_dimension_format(images[0])
+
+ # Checking if image is RGB or grayscale
+ for i in range(len(images)):
+ if not is_grayscale(images[i], input_data_format):
+ images[i] = convert_to_grayscale(images[i], input_data_format=input_data_format)
+
+ images = [
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
+ ]
+
+ data = {"pixel_values": images}
+
+ return BatchFeature(data=data, tensor_type=return_tensors)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/superpoint/modeling_superpoint.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/superpoint/modeling_superpoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..a4350e6d79af6e54388e767637456e8ea485bbe8
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/superpoint/modeling_superpoint.py
@@ -0,0 +1,507 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch SuperPoint model."""
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import torch
+from torch import nn
+
+from transformers import PreTrainedModel
+from transformers.modeling_outputs import (
+ BaseModelOutputWithNoAttention,
+)
+from transformers.models.superpoint.configuration_superpoint import SuperPointConfig
+
+from ...pytorch_utils import is_torch_greater_or_equal_than_1_13
+from ...utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+)
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "SuperPointConfig"
+
+_CHECKPOINT_FOR_DOC = "magic-leap-community/superpoint"
+
+SUPERPOINT_PRETRAINED_MODEL_ARCHIVE_LIST = ["magic-leap-community/superpoint"]
+
+
+def remove_keypoints_from_borders(
+ keypoints: torch.Tensor, scores: torch.Tensor, border: int, height: int, width: int
+) -> Tuple[torch.Tensor, torch.Tensor]:
+ """Removes keypoints (and their associated scores) that are too close to the border"""
+ mask_h = (keypoints[:, 0] >= border) & (keypoints[:, 0] < (height - border))
+ mask_w = (keypoints[:, 1] >= border) & (keypoints[:, 1] < (width - border))
+ mask = mask_h & mask_w
+ return keypoints[mask], scores[mask]
+
+
+def top_k_keypoints(keypoints: torch.Tensor, scores: torch.Tensor, k: int) -> Tuple[torch.Tensor, torch.Tensor]:
+ """Keeps the k keypoints with highest score"""
+ if k >= len(keypoints):
+ return keypoints, scores
+ scores, indices = torch.topk(scores, k, dim=0)
+ return keypoints[indices], scores
+
+
+def simple_nms(scores: torch.Tensor, nms_radius: int) -> torch.Tensor:
+ """Applies non-maximum suppression on scores"""
+ if nms_radius < 0:
+ raise ValueError("Expected positive values for nms_radius")
+
+ def max_pool(x):
+ return nn.functional.max_pool2d(x, kernel_size=nms_radius * 2 + 1, stride=1, padding=nms_radius)
+
+ zeros = torch.zeros_like(scores)
+ max_mask = scores == max_pool(scores)
+ for _ in range(2):
+ supp_mask = max_pool(max_mask.float()) > 0
+ supp_scores = torch.where(supp_mask, zeros, scores)
+ new_max_mask = supp_scores == max_pool(supp_scores)
+ max_mask = max_mask | (new_max_mask & (~supp_mask))
+ return torch.where(max_mask, scores, zeros)
+
+
+@dataclass
+class ImagePointDescriptionOutput(ModelOutput):
+ """
+ Base class for outputs of image point description models. Due to the nature of keypoint detection, the number of
+ keypoints is not fixed and can vary from image to image, which makes batching non-trivial. In the batch of images,
+ the maximum number of keypoints is set as the dimension of the keypoints, scores and descriptors tensors. The mask
+ tensor is used to indicate which values in the keypoints, scores and descriptors tensors are keypoint information
+ and which are padding.
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the decoder of the model.
+ keypoints (`torch.FloatTensor` of shape `(batch_size, num_keypoints, 2)`):
+ Relative (x, y) coordinates of predicted keypoints in a given image.
+ scores (`torch.FloatTensor` of shape `(batch_size, num_keypoints)`):
+ Scores of predicted keypoints.
+ descriptors (`torch.FloatTensor` of shape `(batch_size, num_keypoints, descriptor_size)`):
+ Descriptors of predicted keypoints.
+ mask (`torch.BoolTensor` of shape `(batch_size, num_keypoints)`):
+ Mask indicating which values in keypoints, scores and descriptors are keypoint information.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or
+ when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states
+ (also called feature maps) of the model at the output of each stage.
+ """
+
+ last_hidden_state: torch.FloatTensor = None
+ keypoints: Optional[torch.IntTensor] = None
+ scores: Optional[torch.FloatTensor] = None
+ descriptors: Optional[torch.FloatTensor] = None
+ mask: Optional[torch.BoolTensor] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+
+
+class SuperPointConvBlock(nn.Module):
+ def __init__(
+ self, config: SuperPointConfig, in_channels: int, out_channels: int, add_pooling: bool = False
+ ) -> None:
+ super().__init__()
+ self.conv_a = nn.Conv2d(
+ in_channels,
+ out_channels,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ )
+ self.conv_b = nn.Conv2d(
+ out_channels,
+ out_channels,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ )
+ self.relu = nn.ReLU(inplace=True)
+ self.pool = nn.MaxPool2d(kernel_size=2, stride=2) if add_pooling else None
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.relu(self.conv_a(hidden_states))
+ hidden_states = self.relu(self.conv_b(hidden_states))
+ if self.pool is not None:
+ hidden_states = self.pool(hidden_states)
+ return hidden_states
+
+
+class SuperPointEncoder(nn.Module):
+ """
+ SuperPoint encoder module. It is made of 4 convolutional layers with ReLU activation and max pooling, reducing the
+ dimensionality of the image.
+ """
+
+ def __init__(self, config: SuperPointConfig) -> None:
+ super().__init__()
+ # SuperPoint uses 1 channel images
+ self.input_dim = 1
+
+ conv_blocks = []
+ conv_blocks.append(
+ SuperPointConvBlock(config, self.input_dim, config.encoder_hidden_sizes[0], add_pooling=True)
+ )
+ for i in range(1, len(config.encoder_hidden_sizes) - 1):
+ conv_blocks.append(
+ SuperPointConvBlock(
+ config, config.encoder_hidden_sizes[i - 1], config.encoder_hidden_sizes[i], add_pooling=True
+ )
+ )
+ conv_blocks.append(
+ SuperPointConvBlock(
+ config, config.encoder_hidden_sizes[-2], config.encoder_hidden_sizes[-1], add_pooling=False
+ )
+ )
+ self.conv_blocks = nn.ModuleList(conv_blocks)
+
+ def forward(
+ self,
+ input,
+ output_hidden_states: Optional[bool] = False,
+ return_dict: Optional[bool] = True,
+ ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
+ all_hidden_states = () if output_hidden_states else None
+
+ for conv_block in self.conv_blocks:
+ input = conv_block(input)
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (input,)
+ output = input
+ if not return_dict:
+ return tuple(v for v in [output, all_hidden_states] if v is not None)
+
+ return BaseModelOutputWithNoAttention(
+ last_hidden_state=output,
+ hidden_states=all_hidden_states,
+ )
+
+
+class SuperPointInterestPointDecoder(nn.Module):
+ """
+ The SuperPointInterestPointDecoder uses the output of the SuperPointEncoder to compute the keypoint with scores.
+ The scores are first computed by a convolutional layer, then a softmax is applied to get a probability distribution
+ over the 65 possible keypoint classes. The keypoints are then extracted from the scores by thresholding and
+ non-maximum suppression. Post-processing is then applied to remove keypoints too close to the image borders as well
+ as to keep only the k keypoints with highest score.
+ """
+
+ def __init__(self, config: SuperPointConfig) -> None:
+ super().__init__()
+ self.keypoint_threshold = config.keypoint_threshold
+ self.max_keypoints = config.max_keypoints
+ self.nms_radius = config.nms_radius
+ self.border_removal_distance = config.border_removal_distance
+
+ self.relu = nn.ReLU(inplace=True)
+ self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
+ self.conv_score_a = nn.Conv2d(
+ config.encoder_hidden_sizes[-1],
+ config.decoder_hidden_size,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ )
+ self.conv_score_b = nn.Conv2d(
+ config.decoder_hidden_size, config.keypoint_decoder_dim, kernel_size=1, stride=1, padding=0
+ )
+
+ def forward(self, encoded: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
+ scores = self._get_pixel_scores(encoded)
+ keypoints, scores = self._extract_keypoints(scores)
+
+ return keypoints, scores
+
+ def _get_pixel_scores(self, encoded: torch.Tensor) -> torch.Tensor:
+ """Based on the encoder output, compute the scores for each pixel of the image"""
+ scores = self.relu(self.conv_score_a(encoded))
+ scores = self.conv_score_b(scores)
+ scores = nn.functional.softmax(scores, 1)[:, :-1]
+ batch_size, _, height, width = scores.shape
+ scores = scores.permute(0, 2, 3, 1).reshape(batch_size, height, width, 8, 8)
+ scores = scores.permute(0, 1, 3, 2, 4).reshape(batch_size, height * 8, width * 8)
+ scores = simple_nms(scores, self.nms_radius)
+ return scores
+
+ def _extract_keypoints(self, scores: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
+ """Based on their scores, extract the pixels that represent the keypoints that will be used for descriptors computation"""
+ _, height, width = scores.shape
+
+ # Threshold keypoints by score value
+ keypoints = torch.nonzero(scores[0] > self.keypoint_threshold)
+ scores = scores[0][tuple(keypoints.t())]
+
+ # Discard keypoints near the image borders
+ keypoints, scores = remove_keypoints_from_borders(
+ keypoints, scores, self.border_removal_distance, height * 8, width * 8
+ )
+
+ # Keep the k keypoints with highest score
+ if self.max_keypoints >= 0:
+ keypoints, scores = top_k_keypoints(keypoints, scores, self.max_keypoints)
+
+ # Convert (y, x) to (x, y)
+ keypoints = torch.flip(keypoints, [1]).float()
+
+ return keypoints, scores
+
+
+class SuperPointDescriptorDecoder(nn.Module):
+ """
+ The SuperPointDescriptorDecoder uses the outputs of both the SuperPointEncoder and the
+ SuperPointInterestPointDecoder to compute the descriptors at the keypoints locations.
+
+ The descriptors are first computed by a convolutional layer, then normalized to have a norm of 1. The descriptors
+ are then interpolated at the keypoints locations.
+ """
+
+ def __init__(self, config: SuperPointConfig) -> None:
+ super().__init__()
+
+ self.relu = nn.ReLU(inplace=True)
+ self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
+ self.conv_descriptor_a = nn.Conv2d(
+ config.encoder_hidden_sizes[-1],
+ config.decoder_hidden_size,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ )
+ self.conv_descriptor_b = nn.Conv2d(
+ config.decoder_hidden_size,
+ config.descriptor_decoder_dim,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ )
+
+ def forward(self, encoded: torch.Tensor, keypoints: torch.Tensor) -> torch.Tensor:
+ """Based on the encoder output and the keypoints, compute the descriptors for each keypoint"""
+ descriptors = self.conv_descriptor_b(self.relu(self.conv_descriptor_a(encoded)))
+ descriptors = nn.functional.normalize(descriptors, p=2, dim=1)
+
+ descriptors = self._sample_descriptors(keypoints[None], descriptors[0][None], 8)[0]
+
+ # [descriptor_dim, num_keypoints] -> [num_keypoints, descriptor_dim]
+ descriptors = torch.transpose(descriptors, 0, 1)
+
+ return descriptors
+
+ @staticmethod
+ def _sample_descriptors(keypoints, descriptors, scale: int = 8) -> torch.Tensor:
+ """Interpolate descriptors at keypoint locations"""
+ batch_size, num_channels, height, width = descriptors.shape
+ keypoints = keypoints - scale / 2 + 0.5
+ divisor = torch.tensor([[(width * scale - scale / 2 - 0.5), (height * scale - scale / 2 - 0.5)]])
+ divisor = divisor.to(keypoints)
+ keypoints /= divisor
+ keypoints = keypoints * 2 - 1 # normalize to (-1, 1)
+ kwargs = {"align_corners": True} if is_torch_greater_or_equal_than_1_13 else {}
+ # [batch_size, num_channels, num_keypoints, 2] -> [batch_size, num_channels, num_keypoints, 2]
+ keypoints = keypoints.view(batch_size, 1, -1, 2)
+ descriptors = nn.functional.grid_sample(descriptors, keypoints, mode="bilinear", **kwargs)
+ # [batch_size, descriptor_decoder_dim, num_channels, num_keypoints] -> [batch_size, descriptor_decoder_dim, num_keypoints]
+ descriptors = descriptors.reshape(batch_size, num_channels, -1)
+ descriptors = nn.functional.normalize(descriptors, p=2, dim=1)
+ return descriptors
+
+
+class SuperPointPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = SuperPointConfig
+ base_model_prefix = "superpoint"
+ main_input_name = "pixel_values"
+ supports_gradient_checkpointing = False
+
+ def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
+ """Initialize the weights"""
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+ def extract_one_channel_pixel_values(self, pixel_values: torch.FloatTensor) -> torch.FloatTensor:
+ """
+ Assuming pixel_values has shape (batch_size, 3, height, width), and that all channels values are the same,
+ extract the first channel value to get a tensor of shape (batch_size, 1, height, width) for SuperPoint. This is
+ a workaround for the issue discussed in :
+ https://github.com/huggingface/transformers/pull/25786#issuecomment-1730176446
+
+ Args:
+ pixel_values: torch.FloatTensor of shape (batch_size, 3, height, width)
+
+ Returns:
+ pixel_values: torch.FloatTensor of shape (batch_size, 1, height, width)
+
+ """
+ return pixel_values[:, 0, :, :][:, None, :, :]
+
+
+SUPERPOINT_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`SuperPointConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+ """
+
+SUPERPOINT_INPUTS_DOCSTRING = r"""
+Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`SuperPointImageProcessor`]. See
+ [`SuperPointImageProcessor.__call__`] for details.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more
+ detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+
+
+@add_start_docstrings(
+ "SuperPoint model outputting keypoints and descriptors.",
+ SUPERPOINT_START_DOCSTRING,
+)
+class SuperPointForKeypointDetection(SuperPointPreTrainedModel):
+ """
+ SuperPoint model. It consists of a SuperPointEncoder, a SuperPointInterestPointDecoder and a
+ SuperPointDescriptorDecoder. SuperPoint was proposed in `SuperPoint: Self-Supervised Interest Point Detection and
+ Description `__ by Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. It
+ is a fully convolutional neural network that extracts keypoints and descriptors from an image. It is trained in a
+ self-supervised manner, using a combination of a photometric loss and a loss based on the homographic adaptation of
+ keypoints. It is made of a convolutional encoder and two decoders: one for keypoints and one for descriptors.
+ """
+
+ def __init__(self, config: SuperPointConfig) -> None:
+ super().__init__(config)
+
+ self.config = config
+
+ self.encoder = SuperPointEncoder(config)
+ self.keypoint_decoder = SuperPointInterestPointDecoder(config)
+ self.descriptor_decoder = SuperPointDescriptorDecoder(config)
+
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(SUPERPOINT_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ pixel_values: torch.FloatTensor = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, ImagePointDescriptionOutput]:
+ """
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, SuperPointForKeypointDetection
+ >>> import torch
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> processor = AutoImageProcessor.from_pretrained("magic-leap-community/superpoint")
+ >>> model = SuperPointForKeypointDetection.from_pretrained("magic-leap-community/superpoint")
+
+ >>> inputs = processor(image, return_tensors="pt")
+ >>> outputs = model(**inputs)
+ ```"""
+
+ if labels is not None:
+ raise ValueError(
+ f"SuperPoint is not trainable, no labels should be provided.Therefore, labels should be None but were {type(labels)}"
+ )
+
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ pixel_values = self.extract_one_channel_pixel_values(pixel_values)
+
+ batch_size = pixel_values.shape[0]
+
+ encoder_outputs = self.encoder(
+ pixel_values,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ last_hidden_state = encoder_outputs[0]
+
+ list_keypoints_scores = [
+ self.keypoint_decoder(last_hidden_state[None, ...]) for last_hidden_state in last_hidden_state
+ ]
+
+ list_keypoints = [keypoints_scores[0] for keypoints_scores in list_keypoints_scores]
+ list_scores = [keypoints_scores[1] for keypoints_scores in list_keypoints_scores]
+
+ list_descriptors = [
+ self.descriptor_decoder(last_hidden_state[None, ...], keypoints[None, ...])
+ for last_hidden_state, keypoints in zip(last_hidden_state, list_keypoints)
+ ]
+
+ maximum_num_keypoints = max(keypoints.shape[0] for keypoints in list_keypoints)
+
+ keypoints = torch.zeros((batch_size, maximum_num_keypoints, 2), device=pixel_values.device)
+ scores = torch.zeros((batch_size, maximum_num_keypoints), device=pixel_values.device)
+ descriptors = torch.zeros(
+ (batch_size, maximum_num_keypoints, self.config.descriptor_decoder_dim),
+ device=pixel_values.device,
+ )
+ mask = torch.zeros((batch_size, maximum_num_keypoints), device=pixel_values.device, dtype=torch.int)
+
+ for i, (_keypoints, _scores, _descriptors) in enumerate(zip(list_keypoints, list_scores, list_descriptors)):
+ keypoints[i, : _keypoints.shape[0]] = _keypoints
+ scores[i, : _scores.shape[0]] = _scores
+ descriptors[i, : _descriptors.shape[0]] = _descriptors
+ mask[i, : _scores.shape[0]] = 1
+
+ hidden_states = encoder_outputs[1] if output_hidden_states else None
+ if not return_dict:
+ return tuple(
+ v for v in [last_hidden_state, keypoints, scores, descriptors, mask, hidden_states] if v is not None
+ )
+
+ return ImagePointDescriptionOutput(
+ last_hidden_state=last_hidden_state,
+ keypoints=keypoints,
+ scores=scores,
+ descriptors=descriptors,
+ mask=mask,
+ hidden_states=hidden_states,
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/yoso/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/yoso/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e1f89d73ac47c588d20ba9eccc6186af0f01781e
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/yoso/__init__.py
@@ -0,0 +1,65 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
+
+
+_import_structure = {"configuration_yoso": ["YOSO_PRETRAINED_CONFIG_ARCHIVE_MAP", "YosoConfig"]}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_yoso"] = [
+ "YOSO_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "YosoForMaskedLM",
+ "YosoForMultipleChoice",
+ "YosoForQuestionAnswering",
+ "YosoForSequenceClassification",
+ "YosoForTokenClassification",
+ "YosoLayer",
+ "YosoModel",
+ "YosoPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_yoso import YOSO_PRETRAINED_CONFIG_ARCHIVE_MAP, YosoConfig
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_yoso import (
+ YOSO_PRETRAINED_MODEL_ARCHIVE_LIST,
+ YosoForMaskedLM,
+ YosoForMultipleChoice,
+ YosoForQuestionAnswering,
+ YosoForSequenceClassification,
+ YosoForTokenClassification,
+ YosoLayer,
+ YosoModel,
+ YosoPreTrainedModel,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/yoso/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/yoso/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..34c6115c6396af1d6df486b6840c877faab7b7f4
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/yoso/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/yoso/__pycache__/configuration_yoso.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/yoso/__pycache__/configuration_yoso.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4ad3da2c187e2a33aa518b2cc17768dc62c4e068
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/yoso/__pycache__/configuration_yoso.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/yoso/__pycache__/convert_yoso_pytorch_to_pytorch.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/yoso/__pycache__/convert_yoso_pytorch_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b5d39688a02e06c2ca7f3d1c3eb51d56224525d4
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/yoso/__pycache__/convert_yoso_pytorch_to_pytorch.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/yoso/__pycache__/modeling_yoso.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/yoso/__pycache__/modeling_yoso.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..924a8e72f78ae6793bb1c4691e5c7e9bb14bd01d
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/yoso/__pycache__/modeling_yoso.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/yoso/configuration_yoso.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/yoso/configuration_yoso.py
new file mode 100644
index 0000000000000000000000000000000000000000..02d7f44d3cf2a08fa1bbd59af1c768fd7ed2dfa1
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/yoso/configuration_yoso.py
@@ -0,0 +1,146 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" YOSO model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+YOSO_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "uw-madison/yoso-4096": "https://huggingface.co/uw-madison/yoso-4096/resolve/main/config.json",
+ # See all YOSO models at https://huggingface.co/models?filter=yoso
+}
+
+
+class YosoConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`YosoModel`]. It is used to instantiate an YOSO
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the YOSO
+ [uw-madison/yoso-4096](https://huggingface.co/uw-madison/yoso-4096) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 50265):
+ Vocabulary size of the YOSO model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`YosoModel`].
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimension of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ type_vocab_size (`int`, *optional*, defaults to 2):
+ The vocabulary size of the `token_type_ids` passed when calling [`YosoModel`].
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`.
+ use_expectation (`bool`, *optional*, defaults to `True`):
+ Whether or not to use YOSO Expectation. Overrides any effect of num_hash.
+ hash_code_len (`int`, *optional*, defaults to 9):
+ The length of hashes generated by the hash functions.
+ num_hash (`int`, *optional*, defaults to 64):
+ Number of hash functions used in [`YosoSelfAttention`].
+ conv_window (`int`, *optional*):
+ Kernel size of depth-wise convolution.
+ use_fast_hash (`bool`, *optional*, defaults to `False`):
+ Whether or not to use custom cuda kernels which perform fast random projection via hadamard transform.
+ lsh_backward (`bool`, *optional*, defaults to `True`):
+ Whether or not to perform backpropagation using Locality Sensitive Hashing.
+
+ Example:
+
+ ```python
+ >>> from transformers import YosoConfig, YosoModel
+
+ >>> # Initializing a YOSO uw-madison/yoso-4096 style configuration
+ >>> configuration = YosoConfig()
+
+ >>> # Initializing a model (with random weights) from the uw-madison/yoso-4096 style configuration
+ >>> model = YosoModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "yoso"
+
+ def __init__(
+ self,
+ vocab_size=50265,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.1,
+ attention_probs_dropout_prob=0.1,
+ max_position_embeddings=4096,
+ type_vocab_size=1,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ position_embedding_type="absolute",
+ use_expectation=True,
+ hash_code_len=9,
+ num_hash=64,
+ conv_window=None,
+ use_fast_hash=True,
+ lsh_backward=True,
+ pad_token_id=1,
+ bos_token_id=0,
+ eos_token_id=2,
+ **kwargs,
+ ):
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
+
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.intermediate_size = intermediate_size
+ self.hidden_act = hidden_act
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.initializer_range = initializer_range
+ self.type_vocab_size = type_vocab_size
+ self.layer_norm_eps = layer_norm_eps
+ self.position_embedding_type = position_embedding_type
+ self.use_expectation = use_expectation
+ self.hash_code_len = hash_code_len
+ self.num_hash = num_hash
+ self.conv_window = conv_window
+ self.use_fast_hash = use_fast_hash
+ self.lsh_backward = lsh_backward
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/yoso/convert_yoso_pytorch_to_pytorch.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/yoso/convert_yoso_pytorch_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..be46a4de81b30cff5c826bd9f298b2ee7a8fecbb
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/yoso/convert_yoso_pytorch_to_pytorch.py
@@ -0,0 +1,108 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert YOSO checkpoints from the original repository. URL: https://github.com/mlpen/YOSO"""
+
+import argparse
+
+import torch
+
+from transformers import YosoConfig, YosoForMaskedLM
+
+
+def rename_key(orig_key):
+ if "model" in orig_key:
+ orig_key = orig_key.replace("model.", "")
+ if "norm1" in orig_key:
+ orig_key = orig_key.replace("norm1", "attention.output.LayerNorm")
+ if "norm2" in orig_key:
+ orig_key = orig_key.replace("norm2", "output.LayerNorm")
+ if "norm" in orig_key:
+ orig_key = orig_key.replace("norm", "LayerNorm")
+ if "transformer" in orig_key:
+ layer_num = orig_key.split(".")[0].split("_")[-1]
+ orig_key = orig_key.replace(f"transformer_{layer_num}", f"encoder.layer.{layer_num}")
+ if "mha.attn" in orig_key:
+ orig_key = orig_key.replace("mha.attn", "attention.self")
+ if "mha" in orig_key:
+ orig_key = orig_key.replace("mha", "attention")
+ if "W_q" in orig_key:
+ orig_key = orig_key.replace("W_q", "self.query")
+ if "W_k" in orig_key:
+ orig_key = orig_key.replace("W_k", "self.key")
+ if "W_v" in orig_key:
+ orig_key = orig_key.replace("W_v", "self.value")
+ if "ff1" in orig_key:
+ orig_key = orig_key.replace("ff1", "intermediate.dense")
+ if "ff2" in orig_key:
+ orig_key = orig_key.replace("ff2", "output.dense")
+ if "ff" in orig_key:
+ orig_key = orig_key.replace("ff", "output.dense")
+ if "mlm_class" in orig_key:
+ orig_key = orig_key.replace("mlm.mlm_class", "cls.predictions.decoder")
+ if "mlm" in orig_key:
+ orig_key = orig_key.replace("mlm", "cls.predictions.transform")
+ if "cls" not in orig_key:
+ orig_key = "yoso." + orig_key
+
+ return orig_key
+
+
+def convert_checkpoint_helper(max_position_embeddings, orig_state_dict):
+ for key in orig_state_dict.copy().keys():
+ val = orig_state_dict.pop(key)
+
+ if ("pooler" in key) or ("sen_class" in key):
+ continue
+ else:
+ orig_state_dict[rename_key(key)] = val
+
+ orig_state_dict["cls.predictions.bias"] = orig_state_dict["cls.predictions.decoder.bias"]
+ orig_state_dict["yoso.embeddings.position_ids"] = torch.arange(max_position_embeddings).expand((1, -1)) + 2
+
+ return orig_state_dict
+
+
+def convert_yoso_checkpoint(checkpoint_path, yoso_config_file, pytorch_dump_path):
+ orig_state_dict = torch.load(checkpoint_path, map_location="cpu")["model_state_dict"]
+ config = YosoConfig.from_json_file(yoso_config_file)
+ model = YosoForMaskedLM(config)
+
+ new_state_dict = convert_checkpoint_helper(config.max_position_embeddings, orig_state_dict)
+
+ print(model.load_state_dict(new_state_dict))
+ model.eval()
+ model.save_pretrained(pytorch_dump_path)
+
+ print(f"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
+ )
+ parser.add_argument(
+ "--config_file",
+ default=None,
+ type=str,
+ required=True,
+ help="The json file for YOSO model config.",
+ )
+ parser.add_argument(
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
+ )
+ args = parser.parse_args()
+ convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/yoso/modeling_yoso.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/yoso/modeling_yoso.py
new file mode 100644
index 0000000000000000000000000000000000000000..41e34a6c66c42bd54d3711536996b4abd03c5be1
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/yoso/modeling_yoso.py
@@ -0,0 +1,1309 @@
+# coding=utf-8
+# Copyright 2022 University of Wisconsin-Madison and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch YOSO model."""
+
+
+import math
+from pathlib import Path
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import (
+ BaseModelOutputWithCrossAttentions,
+ MaskedLMOutput,
+ MultipleChoiceModelOutput,
+ QuestionAnsweringModelOutput,
+ SequenceClassifierOutput,
+ TokenClassifierOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_ninja_available,
+ is_torch_cuda_available,
+ logging,
+)
+from .configuration_yoso import YosoConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "uw-madison/yoso-4096"
+_CONFIG_FOR_DOC = "YosoConfig"
+
+YOSO_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "uw-madison/yoso-4096",
+ # See all YOSO models at https://huggingface.co/models?filter=yoso
+]
+
+lsh_cumulation = None
+
+
+def load_cuda_kernels():
+ global lsh_cumulation
+ from torch.utils.cpp_extension import load
+
+ def append_root(files):
+ src_folder = Path(__file__).resolve().parent.parent.parent / "kernels" / "yoso"
+ return [src_folder / file for file in files]
+
+ src_files = append_root(["fast_lsh_cumulation_torch.cpp", "fast_lsh_cumulation.cu", "fast_lsh_cumulation_cuda.cu"])
+
+ load("fast_lsh_cumulation", src_files, verbose=True)
+
+ import fast_lsh_cumulation as lsh_cumulation
+
+
+def to_contiguous(input_tensors):
+ if isinstance(input_tensors, list):
+ out = []
+ for tensor in input_tensors:
+ if not tensor.is_contiguous():
+ tensor = tensor.contiguous()
+ out.append(tensor)
+ return out
+ else:
+ if not input_tensors.is_contiguous():
+ input_tensors = input_tensors.contiguous()
+ return input_tensors
+
+
+def normalize(input_tensors):
+ if isinstance(input_tensors, list):
+ out = []
+ for tensor in input_tensors:
+ out.append(nn.functional.normalize(tensor, p=2, dim=-1))
+ return out
+ else:
+ return nn.functional.normalize(input_tensors, p=2, dim=-1)
+
+
+def hashing(query, key, num_hash, hash_len):
+ if len(query.size()) != 3:
+ raise ValueError("Query has incorrect size.")
+ if len(key.size()) != 3:
+ raise ValueError("Key has incorrect size.")
+
+ rmat = torch.randn(query.size(0), query.size(2), num_hash * hash_len, device=query.device)
+ raise_pow = 2 ** torch.arange(hash_len, device=query.device)
+
+ query_projection = torch.matmul(query, rmat).reshape(query.size(0), query.size(1), num_hash, hash_len)
+ key_projection = torch.matmul(key, rmat).reshape(key.size(0), key.size(1), num_hash, hash_len)
+ query_binary = (query_projection > 0).int()
+ key_binary = (key_projection > 0).int()
+ query_hash = torch.sum(query_binary * raise_pow, dim=-1)
+ query_hash = torch.sum(key_binary * raise_pow, dim=-1)
+
+ return query_hash.int(), query_hash.int()
+
+
+class YosoCumulation(torch.autograd.Function):
+ @staticmethod
+ def forward(ctx, query_mask, key_mask, query, key, value, config):
+ hash_code_len = config["hash_code_len"]
+
+ expectation = (1 - torch.acos(torch.matmul(query, key.transpose(-1, -2))) / math.pi) ** hash_code_len
+ expectation = expectation * query_mask[:, :, None] * key_mask[:, None, :]
+ cumulation_value = torch.matmul(expectation, value)
+
+ ctx.save_for_backward(query_mask, key_mask, expectation, query, key, value)
+ ctx.config = config
+
+ return cumulation_value
+
+ @staticmethod
+ def backward(ctx, grad):
+ grad = to_contiguous(grad)
+
+ query_mask, key_mask, expectation, query, key, value = ctx.saved_tensors
+ config = ctx.config
+
+ hash_code_len = config["hash_code_len"]
+
+ weighted_exp = torch.matmul(grad, value.transpose(-1, -2)) * expectation
+ grad_query = torch.matmul(weighted_exp, (hash_code_len / 2) * key)
+ grad_key = torch.matmul(weighted_exp.transpose(-1, -2), (hash_code_len / 2) * query)
+ grad_value = torch.matmul(expectation.transpose(-1, -2), grad)
+
+ return None, None, grad_query, grad_key, grad_value, None
+
+
+class YosoLSHCumulation(torch.autograd.Function):
+ @staticmethod
+ def forward(ctx, query_mask, key_mask, query, key, value, config):
+ if query_mask.size(0) != key_mask.size(0):
+ raise ValueError("Query mask and Key mask differ in sizes in dimension 0")
+ if query_mask.size(0) != query.size(0):
+ raise ValueError("Query mask and Query differ in sizes in dimension 0")
+ if query_mask.size(0) != key.size(0):
+ raise ValueError("Query mask and Key differ in sizes in dimension 0")
+ if query_mask.size(0) != value.size(0):
+ raise ValueError("Query mask and Value mask differ in sizes in dimension 0")
+ if key.size(1) != value.size(1):
+ raise ValueError("Key and Value differ in sizes in dimension 1")
+ if query.size(2) != key.size(2):
+ raise ValueError("Query and Key differ in sizes in dimension 2")
+
+ query_mask, key_mask, query, key, value = to_contiguous([query_mask, key_mask, query, key, value])
+
+ use_cuda = query_mask.is_cuda
+ num_hash = config["num_hash"]
+ hash_code_len = config["hash_code_len"]
+ hashtable_capacity = int(2**hash_code_len)
+
+ if config["use_fast_hash"]:
+ query_hash_code, key_hash_code = lsh_cumulation.fast_hash(
+ query_mask, query, key_mask, key, num_hash, hash_code_len, use_cuda, 1
+ )
+ else:
+ query_hash_code, key_hash_code = hashing(query, key, num_hash, hash_code_len)
+
+ cumulation_value = lsh_cumulation.lsh_cumulation(
+ query_mask, query_hash_code, key_mask, key_hash_code, value, hashtable_capacity, use_cuda, 1
+ )
+
+ ctx.save_for_backward(query_mask, key_mask, query_hash_code, key_hash_code, query, key, value)
+ ctx.config = config
+
+ return cumulation_value
+
+ @staticmethod
+ def backward(ctx, grad):
+ grad = to_contiguous(grad)
+
+ query_mask, key_mask, query_hash_code, key_hash_code, query, key, value = ctx.saved_tensors
+ config = ctx.config
+
+ use_cuda = grad.is_cuda
+ hash_code_len = config["hash_code_len"]
+ hashtable_capacity = int(2**hash_code_len)
+
+ if config["lsh_backward"]:
+ grad_value = lsh_cumulation.lsh_cumulation(
+ key_mask, key_hash_code, query_mask, query_hash_code, grad, hashtable_capacity, use_cuda, 1
+ )
+ grad_query = lsh_cumulation.lsh_weighted_cumulation(
+ query_mask,
+ query_hash_code,
+ grad,
+ key_mask,
+ key_hash_code,
+ value,
+ (hash_code_len / 2) * key,
+ hashtable_capacity,
+ use_cuda,
+ 4,
+ )
+ grad_key = lsh_cumulation.lsh_weighted_cumulation(
+ key_mask,
+ key_hash_code,
+ value,
+ query_mask,
+ query_hash_code,
+ grad,
+ (hash_code_len / 2) * query,
+ hashtable_capacity,
+ use_cuda,
+ 4,
+ )
+ else:
+ expectation = (1 - torch.acos(torch.matmul(query, key.transpose(-1, -2))) / math.pi) ** hash_code_len
+ expectation = expectation * query_mask[:, :, None] * key_mask[:, None, :]
+ weighted_exp = torch.matmul(grad, value.transpose(-1, -2)) * expectation
+ grad_query = torch.matmul(weighted_exp, (hash_code_len / 2) * key)
+ grad_key = torch.matmul(weighted_exp.transpose(-1, -2), (hash_code_len / 2) * query)
+ grad_value = torch.matmul(expectation.transpose(-1, -2), grad)
+
+ return None, None, grad_query, grad_key, grad_value, None
+
+
+# Copied from transformers.models.nystromformer.modeling_nystromformer.NystromformerEmbeddings
+class YosoEmbeddings(nn.Module):
+ """Construct the embeddings from word, position and token_type embeddings."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings + 2, config.hidden_size)
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
+
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
+ # any TensorFlow checkpoint file
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)) + 2, persistent=False
+ )
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
+ self.register_buffer(
+ "token_type_ids",
+ torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device),
+ persistent=False,
+ )
+
+ def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
+ if input_ids is not None:
+ input_shape = input_ids.size()
+ else:
+ input_shape = inputs_embeds.size()[:-1]
+
+ seq_length = input_shape[1]
+
+ if position_ids is None:
+ position_ids = self.position_ids[:, :seq_length]
+
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
+ # issue #5664
+ if token_type_ids is None:
+ if hasattr(self, "token_type_ids"):
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
+ token_type_ids = buffered_token_type_ids_expanded
+ else:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
+
+ embeddings = inputs_embeds + token_type_embeddings
+ if self.position_embedding_type == "absolute":
+ position_embeddings = self.position_embeddings(position_ids)
+ embeddings += position_embeddings
+ embeddings = self.LayerNorm(embeddings)
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+
+class YosoSelfAttention(nn.Module):
+ def __init__(self, config, position_embedding_type=None):
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads})"
+ )
+ kernel_loaded = lsh_cumulation is not None
+ if is_torch_cuda_available() and is_ninja_available() and not kernel_loaded:
+ try:
+ load_cuda_kernels()
+ except Exception as e:
+ logger.warning(f"Could not load the custom kernel for multi-scale deformable attention: {e}")
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+ self.position_embedding_type = (
+ position_embedding_type if position_embedding_type is not None else config.position_embedding_type
+ )
+
+ self.use_expectation = config.use_expectation
+ self.hash_code_len = config.hash_code_len
+ self.use_conv = config.conv_window is not None
+ self.use_fast_hash = config.use_fast_hash
+ self.num_hash = config.num_hash
+ self.lsh_backward = config.lsh_backward
+
+ self.lsh_config = {
+ "hash_code_len": self.hash_code_len,
+ "use_fast_hash": self.use_fast_hash,
+ "num_hash": self.num_hash,
+ "lsh_backward": self.lsh_backward,
+ }
+
+ if config.conv_window is not None:
+ self.conv = nn.Conv2d(
+ in_channels=config.num_attention_heads,
+ out_channels=config.num_attention_heads,
+ kernel_size=(config.conv_window, 1),
+ padding=(config.conv_window // 2, 0),
+ bias=False,
+ groups=config.num_attention_heads,
+ )
+
+ def transpose_for_scores(self, layer):
+ new_layer_shape = layer.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ layer = layer.view(*new_layer_shape)
+ return layer.permute(0, 2, 1, 3)
+
+ def forward(self, hidden_states, attention_mask=None, output_attentions=False):
+ mixed_query_layer = self.query(hidden_states)
+
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ if self.use_conv:
+ conv_value_layer = self.conv(value_layer * attention_mask[:, None, :, None])
+
+ batch_size, num_heads, seq_len, head_dim = query_layer.size()
+
+ query_layer = query_layer.reshape(batch_size * num_heads, seq_len, head_dim)
+ key_layer = key_layer.reshape(batch_size * num_heads, seq_len, head_dim)
+ value_layer = value_layer.reshape(batch_size * num_heads, seq_len, head_dim)
+
+ attention_mask = 1.0 + attention_mask / 10000.0
+ attention_mask = (
+ attention_mask.unsqueeze(1)
+ .repeat_interleave(num_heads, dim=1)
+ .reshape(batch_size * num_heads, seq_len)
+ .int()
+ )
+
+ # The CUDA kernels are most efficient with inputs whose size is a multiple of a GPU's warp size (32). Inputs
+ # smaller than this are padded with zeros.
+ gpu_warp_size = 32
+
+ if (not self.use_expectation) and head_dim < gpu_warp_size:
+ pad_size = batch_size * num_heads, seq_len, gpu_warp_size - head_dim
+
+ query_layer = torch.cat(
+ [
+ query_layer,
+ torch.zeros(pad_size, device=query_layer.device),
+ ],
+ dim=-1,
+ )
+ key_layer = torch.cat(
+ [
+ key_layer,
+ torch.zeros(pad_size, device=key_layer.device),
+ ],
+ dim=-1,
+ )
+ value_layer = torch.cat(
+ [
+ value_layer,
+ torch.zeros(pad_size, device=value_layer.device),
+ ],
+ dim=-1,
+ )
+
+ if self.use_expectation or self.training:
+ query_layer, key_layer = normalize([query_layer, key_layer])
+
+ if self.use_expectation:
+ context_layer = YosoCumulation.apply(
+ attention_mask, attention_mask, query_layer, key_layer, value_layer, self.lsh_config
+ )
+ else:
+ context_layer = YosoLSHCumulation.apply(
+ attention_mask, attention_mask, query_layer, key_layer, value_layer, self.lsh_config
+ )
+
+ if (not self.use_expectation) and head_dim < gpu_warp_size:
+ context_layer = context_layer[:, :, :head_dim]
+
+ context_layer = normalize(context_layer)
+
+ context_layer = context_layer.reshape(batch_size, num_heads, seq_len, head_dim)
+
+ if self.use_conv:
+ context_layer += conv_value_layer
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(*new_context_layer_shape)
+
+ outputs = (context_layer, context_layer) if output_attentions else (context_layer,)
+
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertSelfOutput
+class YosoSelfOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+class YosoAttention(nn.Module):
+ def __init__(self, config, position_embedding_type=None):
+ super().__init__()
+ self.self = YosoSelfAttention(config, position_embedding_type=position_embedding_type)
+ self.output = YosoSelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.self.query = prune_linear_layer(self.self.query, index)
+ self.self.key = prune_linear_layer(self.self.key, index)
+ self.self.value = prune_linear_layer(self.self.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(self, hidden_states, attention_mask=None, output_attentions=False):
+ self_outputs = self.self(hidden_states, attention_mask, output_attentions)
+ attention_output = self.output(self_outputs[0], hidden_states)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertIntermediate
+class YosoIntermediate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertOutput
+class YosoOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+class YosoLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.attention = YosoAttention(config)
+ self.add_cross_attention = config.add_cross_attention
+ self.intermediate = YosoIntermediate(config)
+ self.output = YosoOutput(config)
+
+ def forward(self, hidden_states, attention_mask=None, output_attentions=False):
+ self_attention_outputs = self.attention(hidden_states, attention_mask, output_attentions=output_attentions)
+ attention_output = self_attention_outputs[0]
+
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ layer_output = apply_chunking_to_forward(
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
+ )
+ outputs = (layer_output,) + outputs
+
+ return outputs
+
+ def feed_forward_chunk(self, attention_output):
+ intermediate_output = self.intermediate(attention_output)
+ layer_output = self.output(intermediate_output, attention_output)
+ return layer_output
+
+
+class YosoEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList([YosoLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ output_attentions=False,
+ output_hidden_states=False,
+ return_dict=True,
+ ):
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ attention_mask,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(hidden_states, attention_mask, output_attentions)
+
+ hidden_states = layer_outputs[0]
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
+ return BaseModelOutputWithCrossAttentions(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+
+# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform
+class YosoPredictionHeadTransform(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ if isinstance(config.hidden_act, str):
+ self.transform_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.transform_act_fn = config.hidden_act
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.transform_act_fn(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->Yoso
+class YosoLMPredictionHead(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.transform = YosoPredictionHeadTransform(config)
+
+ # The output weights are the same as the input embeddings, but there is
+ # an output-only bias for each token.
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
+
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
+ self.decoder.bias = self.bias
+
+ def forward(self, hidden_states):
+ hidden_states = self.transform(hidden_states)
+ hidden_states = self.decoder(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->Yoso
+class YosoOnlyMLMHead(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.predictions = YosoLMPredictionHead(config)
+
+ def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
+ prediction_scores = self.predictions(sequence_output)
+ return prediction_scores
+
+
+class YosoPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = YosoConfig
+ base_model_prefix = "yoso"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+YOSO_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`YosoConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+YOSO_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare YOSO Model transformer outputting raw hidden-states without any specific head on top.",
+ YOSO_START_DOCSTRING,
+)
+class YosoModel(YosoPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = YosoEmbeddings(config)
+ self.encoder = YosoEncoder(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.word_embeddings = value
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(YOSO_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithCrossAttentions,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithCrossAttentions]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ batch_size, seq_length = input_shape
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ if attention_mask is None:
+ attention_mask = torch.ones(((batch_size, seq_length)), device=device)
+
+ if token_type_ids is None:
+ if hasattr(self.embeddings, "token_type_ids"):
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
+ token_type_ids = buffered_token_type_ids_expanded
+ else:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ embedding_output = self.embeddings(
+ input_ids=input_ids,
+ position_ids=position_ids,
+ token_type_ids=token_type_ids,
+ inputs_embeds=inputs_embeds,
+ )
+ encoder_outputs = self.encoder(
+ embedding_output,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+
+ if not return_dict:
+ return (sequence_output,) + encoder_outputs[1:]
+
+ return BaseModelOutputWithCrossAttentions(
+ last_hidden_state=sequence_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ cross_attentions=encoder_outputs.cross_attentions,
+ )
+
+
+@add_start_docstrings("""YOSO Model with a `language modeling` head on top.""", YOSO_START_DOCSTRING)
+class YosoForMaskedLM(YosoPreTrainedModel):
+ _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"]
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.yoso = YosoModel(config)
+ self.cls = YosoOnlyMLMHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.cls.predictions.decoder
+
+ def set_output_embeddings(self, new_embeddings):
+ self.cls.predictions.decoder = new_embeddings
+
+ @add_start_docstrings_to_model_forward(YOSO_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=MaskedLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, MaskedLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.yoso(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+ prediction_scores = self.cls(sequence_output)
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[1:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return MaskedLMOutput(
+ loss=masked_lm_loss,
+ logits=prediction_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+class YosoClassificationHead(nn.Module):
+ """Head for sentence-level classification tasks."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
+
+ self.config = config
+
+ def forward(self, features, **kwargs):
+ x = features[:, 0, :] # take token (equiv. to [CLS])
+ x = self.dropout(x)
+ x = self.dense(x)
+ x = ACT2FN[self.config.hidden_act](x)
+ x = self.dropout(x)
+ x = self.out_proj(x)
+ return x
+
+
+@add_start_docstrings(
+ """YOSO Model transformer with a sequence classification/regression head on top (a linear layer on top of
+ the pooled output) e.g. for GLUE tasks.""",
+ YOSO_START_DOCSTRING,
+)
+class YosoForSequenceClassification(YosoPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.yoso = YosoModel(config)
+ self.classifier = YosoClassificationHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(YOSO_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=SequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, SequenceClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.yoso(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """YOSO Model with a multiple choice classification head on top (a linear layer on top of
+ the pooled output and a softmax) e.g. for RocStories/SWAG tasks.""",
+ YOSO_START_DOCSTRING,
+)
+class YosoForMultipleChoice(YosoPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.yoso = YosoModel(config)
+ self.pre_classifier = nn.Linear(config.hidden_size, config.hidden_size)
+ self.classifier = nn.Linear(config.hidden_size, 1)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(YOSO_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=MultipleChoiceModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, MultipleChoiceModelOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
+ `input_ids` above)
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
+
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
+ inputs_embeds = (
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
+ if inputs_embeds is not None
+ else None
+ )
+
+ outputs = self.yoso(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_state = outputs[0] # (bs * num_choices, seq_len, dim)
+ pooled_output = hidden_state[:, 0] # (bs * num_choices, dim)
+ pooled_output = self.pre_classifier(pooled_output) # (bs * num_choices, dim)
+ pooled_output = nn.ReLU()(pooled_output) # (bs * num_choices, dim)
+ logits = self.classifier(pooled_output)
+
+ reshaped_logits = logits.view(-1, num_choices)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(reshaped_logits, labels)
+
+ if not return_dict:
+ output = (reshaped_logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return MultipleChoiceModelOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """YOSO Model with a token classification head on top (a linear layer on top of
+ the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.""",
+ YOSO_START_DOCSTRING,
+)
+class YosoForTokenClassification(YosoPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.yoso = YosoModel(config)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(YOSO_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, TokenClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.yoso(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ sequence_output = self.dropout(sequence_output)
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ # Only keep active parts of the loss
+ if attention_mask is not None:
+ active_loss = attention_mask.view(-1) == 1
+ active_logits = logits.view(-1, self.num_labels)
+ active_labels = torch.where(
+ active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
+ )
+ loss = loss_fct(active_logits, active_labels)
+ else:
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """YOSO Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).""",
+ YOSO_START_DOCSTRING,
+)
+class YosoForQuestionAnswering(YosoPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ config.num_labels = 2
+ self.num_labels = config.num_labels
+
+ self.yoso = YosoModel(config)
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(YOSO_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=QuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ start_positions: Optional[torch.Tensor] = None,
+ end_positions: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.yoso(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = logits.split(1, dim=-1)
+ start_logits = start_logits.squeeze(-1)
+ end_logits = end_logits.squeeze(-1)
+
+ total_loss = None
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, split add a dimension
+ if len(start_positions.size()) > 1:
+ start_positions = start_positions.squeeze(-1)
+ if len(end_positions.size()) > 1:
+ end_positions = end_positions.squeeze(-1)
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
+ ignored_index = start_logits.size(1)
+ start_positions = start_positions.clamp(0, ignored_index)
+ end_positions = end_positions.clamp(0, ignored_index)
+
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[1:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return QuestionAnsweringModelOutput(
+ loss=total_loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )