diff --git a/ckpts/universal/global_step40/zero/13.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/13.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt
new file mode 100644
index 0000000000000000000000000000000000000000..61d0b16f8595011d1d76f9846b36e332882e0d3d
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/13.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5c58d9a4ba78bf6b0804db683329e66eb8ae93b01ff31f4f020feb4fad4514ee
+size 33555612
diff --git a/ckpts/universal/global_step40/zero/13.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/13.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt
new file mode 100644
index 0000000000000000000000000000000000000000..9826506a66ff12a6f292cccf94a14fad257f994d
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/13.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ce326dc8c6d418331420b423287b45bfa55d60a768a3d87f44baf2125a288035
+size 33555627
diff --git a/ckpts/universal/global_step40/zero/13.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step40/zero/13.mlp.dense_h_to_4h_swiglu.weight/fp32.pt
new file mode 100644
index 0000000000000000000000000000000000000000..2b6751d00a6532a1e9fd5f08906d9a64079f9a66
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/13.mlp.dense_h_to_4h_swiglu.weight/fp32.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:058ce3d1d39eba8b80c35b26ea61854f1330e05b249b0e8edb3e8a41ed44893d
+size 33555533
diff --git a/venv/lib/python3.10/site-packages/transformers/models/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..292a264644be85f9d7350756415c37d062fdb159
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/__init__.py
@@ -0,0 +1,273 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import (
+ albert,
+ align,
+ altclip,
+ audio_spectrogram_transformer,
+ auto,
+ autoformer,
+ bark,
+ bart,
+ barthez,
+ bartpho,
+ beit,
+ bert,
+ bert_generation,
+ bert_japanese,
+ bertweet,
+ big_bird,
+ bigbird_pegasus,
+ biogpt,
+ bit,
+ blenderbot,
+ blenderbot_small,
+ blip,
+ blip_2,
+ bloom,
+ bridgetower,
+ bros,
+ byt5,
+ camembert,
+ canine,
+ chinese_clip,
+ clap,
+ clip,
+ clipseg,
+ clvp,
+ code_llama,
+ codegen,
+ cohere,
+ conditional_detr,
+ convbert,
+ convnext,
+ convnextv2,
+ cpm,
+ cpmant,
+ ctrl,
+ cvt,
+ data2vec,
+ dbrx,
+ deberta,
+ deberta_v2,
+ decision_transformer,
+ deformable_detr,
+ deit,
+ deprecated,
+ depth_anything,
+ deta,
+ detr,
+ dialogpt,
+ dinat,
+ dinov2,
+ distilbert,
+ dit,
+ donut,
+ dpr,
+ dpt,
+ efficientformer,
+ efficientnet,
+ electra,
+ encodec,
+ encoder_decoder,
+ ernie,
+ ernie_m,
+ esm,
+ falcon,
+ fastspeech2_conformer,
+ flaubert,
+ flava,
+ fnet,
+ focalnet,
+ fsmt,
+ funnel,
+ fuyu,
+ gemma,
+ git,
+ glpn,
+ gpt2,
+ gpt_bigcode,
+ gpt_neo,
+ gpt_neox,
+ gpt_neox_japanese,
+ gpt_sw3,
+ gptj,
+ gptsan_japanese,
+ graphormer,
+ grounding_dino,
+ groupvit,
+ herbert,
+ hubert,
+ ibert,
+ idefics,
+ idefics2,
+ imagegpt,
+ informer,
+ instructblip,
+ jamba,
+ jukebox,
+ kosmos2,
+ layoutlm,
+ layoutlmv2,
+ layoutlmv3,
+ layoutxlm,
+ led,
+ levit,
+ lilt,
+ llama,
+ llava,
+ llava_next,
+ longformer,
+ longt5,
+ luke,
+ lxmert,
+ m2m_100,
+ mamba,
+ marian,
+ markuplm,
+ mask2former,
+ maskformer,
+ mbart,
+ mbart50,
+ mega,
+ megatron_bert,
+ megatron_gpt2,
+ mgp_str,
+ mistral,
+ mixtral,
+ mluke,
+ mobilebert,
+ mobilenet_v1,
+ mobilenet_v2,
+ mobilevit,
+ mobilevitv2,
+ mpnet,
+ mpt,
+ mra,
+ mt5,
+ musicgen,
+ musicgen_melody,
+ mvp,
+ nat,
+ nezha,
+ nllb,
+ nllb_moe,
+ nougat,
+ nystromformer,
+ olmo,
+ oneformer,
+ openai,
+ opt,
+ owlv2,
+ owlvit,
+ patchtsmixer,
+ patchtst,
+ pegasus,
+ pegasus_x,
+ perceiver,
+ persimmon,
+ phi,
+ phobert,
+ pix2struct,
+ plbart,
+ poolformer,
+ pop2piano,
+ prophetnet,
+ pvt,
+ pvt_v2,
+ qdqbert,
+ qwen2,
+ qwen2_moe,
+ rag,
+ realm,
+ recurrent_gemma,
+ reformer,
+ regnet,
+ rembert,
+ resnet,
+ roberta,
+ roberta_prelayernorm,
+ roc_bert,
+ roformer,
+ rwkv,
+ sam,
+ seamless_m4t,
+ seamless_m4t_v2,
+ segformer,
+ seggpt,
+ sew,
+ sew_d,
+ siglip,
+ speech_encoder_decoder,
+ speech_to_text,
+ speech_to_text_2,
+ speecht5,
+ splinter,
+ squeezebert,
+ stablelm,
+ starcoder2,
+ superpoint,
+ swiftformer,
+ swin,
+ swin2sr,
+ swinv2,
+ switch_transformers,
+ t5,
+ table_transformer,
+ tapas,
+ time_series_transformer,
+ timesformer,
+ timm_backbone,
+ trocr,
+ tvlt,
+ tvp,
+ udop,
+ umt5,
+ unispeech,
+ unispeech_sat,
+ univnet,
+ upernet,
+ videomae,
+ vilt,
+ vipllava,
+ vision_encoder_decoder,
+ vision_text_dual_encoder,
+ visual_bert,
+ vit,
+ vit_hybrid,
+ vit_mae,
+ vit_msn,
+ vitdet,
+ vitmatte,
+ vits,
+ vivit,
+ wav2vec2,
+ wav2vec2_bert,
+ wav2vec2_conformer,
+ wav2vec2_phoneme,
+ wav2vec2_with_lm,
+ wavlm,
+ whisper,
+ x_clip,
+ xglm,
+ xlm,
+ xlm_prophetnet,
+ xlm_roberta,
+ xlm_roberta_xl,
+ xlnet,
+ xmod,
+ yolos,
+ yoso,
+)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/bit/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/bit/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..fc50659d9fa06820ebe1edc7b56ab3d5de4ef67b
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/bit/__init__.py
@@ -0,0 +1,73 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
+
+
+_import_structure = {"configuration_bit": ["BIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BitConfig", "BitOnnxConfig"]}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_bit"] = [
+ "BIT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "BitForImageClassification",
+ "BitModel",
+ "BitPreTrainedModel",
+ "BitBackbone",
+ ]
+
+
+try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["image_processing_bit"] = ["BitImageProcessor"]
+
+
+if TYPE_CHECKING:
+ from .configuration_bit import BIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BitConfig, BitOnnxConfig
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_bit import (
+ BIT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ BitBackbone,
+ BitForImageClassification,
+ BitModel,
+ BitPreTrainedModel,
+ )
+
+ try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .image_processing_bit import BitImageProcessor
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/bit/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/bit/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..087c22fd4cb90da3f0c50765aaa3d119065f0419
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/bit/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/bit/__pycache__/configuration_bit.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/bit/__pycache__/configuration_bit.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..db8aa8153281bae2601ebd03e3a0f644a3abf136
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/bit/__pycache__/configuration_bit.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/bit/__pycache__/convert_bit_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/bit/__pycache__/convert_bit_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9181271314ebb9cf29cd0ebd327683aa21b34a90
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/bit/__pycache__/convert_bit_to_pytorch.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/bit/__pycache__/image_processing_bit.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/bit/__pycache__/image_processing_bit.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a75f5eedb5525b57019aa99865dbb4565c0534fe
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/bit/__pycache__/image_processing_bit.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/bit/__pycache__/modeling_bit.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/bit/__pycache__/modeling_bit.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..07c44d63b9711f2a7e195c4bd9dcee755ad3c926
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/bit/__pycache__/modeling_bit.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/bit/configuration_bit.py b/venv/lib/python3.10/site-packages/transformers/models/bit/configuration_bit.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ec6307421bfaab92825496e6c7464ff20793d7d
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/bit/configuration_bit.py
@@ -0,0 +1,136 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" BiT model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import BIT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class BitConfig(BackboneConfigMixin, PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`BitModel`]. It is used to instantiate an BiT
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the BiT
+ [google/bit-50](https://huggingface.co/google/bit-50) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ num_channels (`int`, *optional*, defaults to 3):
+ The number of input channels.
+ embedding_size (`int`, *optional*, defaults to 64):
+ Dimensionality (hidden size) for the embedding layer.
+ hidden_sizes (`List[int]`, *optional*, defaults to `[256, 512, 1024, 2048]`):
+ Dimensionality (hidden size) at each stage.
+ depths (`List[int]`, *optional*, defaults to `[3, 4, 6, 3]`):
+ Depth (number of layers) for each stage.
+ layer_type (`str`, *optional*, defaults to `"preactivation"`):
+ The layer to use, it can be either `"preactivation"` or `"bottleneck"`.
+ hidden_act (`str`, *optional*, defaults to `"relu"`):
+ The non-linear activation function in each block. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"`
+ are supported.
+ global_padding (`str`, *optional*):
+ Padding strategy to use for the convolutional layers. Can be either `"valid"`, `"same"`, or `None`.
+ num_groups (`int`, *optional*, defaults to 32):
+ Number of groups used for the `BitGroupNormActivation` layers.
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
+ The drop path rate for the stochastic depth.
+ embedding_dynamic_padding (`bool`, *optional*, defaults to `False`):
+ Whether or not to make use of dynamic padding for the embedding layer.
+ output_stride (`int`, *optional*, defaults to 32):
+ The output stride of the model.
+ width_factor (`int`, *optional*, defaults to 1):
+ The width factor for the model.
+ out_features (`List[str]`, *optional*):
+ If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
+ (depending on how many stages the model has). If unset and `out_indices` is set, will default to the
+ corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
+ same order as defined in the `stage_names` attribute.
+ out_indices (`List[int]`, *optional*):
+ If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
+ many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
+ If unset and `out_features` is unset, will default to the last stage. Must be in the
+ same order as defined in the `stage_names` attribute.
+
+ Example:
+ ```python
+ >>> from transformers import BitConfig, BitModel
+
+ >>> # Initializing a BiT bit-50 style configuration
+ >>> configuration = BitConfig()
+
+ >>> # Initializing a model (with random weights) from the bit-50 style configuration
+ >>> model = BitModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```
+ """
+
+ model_type = "bit"
+ layer_types = ["preactivation", "bottleneck"]
+ supported_padding = ["SAME", "VALID"]
+
+ def __init__(
+ self,
+ num_channels=3,
+ embedding_size=64,
+ hidden_sizes=[256, 512, 1024, 2048],
+ depths=[3, 4, 6, 3],
+ layer_type="preactivation",
+ hidden_act="relu",
+ global_padding=None,
+ num_groups=32,
+ drop_path_rate=0.0,
+ embedding_dynamic_padding=False,
+ output_stride=32,
+ width_factor=1,
+ out_features=None,
+ out_indices=None,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ if layer_type not in self.layer_types:
+ raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types)}")
+ if global_padding is not None:
+ if global_padding.upper() in self.supported_padding:
+ global_padding = global_padding.upper()
+ else:
+ raise ValueError(f"Padding strategy {global_padding} not supported")
+ self.num_channels = num_channels
+ self.embedding_size = embedding_size
+ self.hidden_sizes = hidden_sizes
+ self.depths = depths
+ self.layer_type = layer_type
+ self.hidden_act = hidden_act
+ self.global_padding = global_padding
+ self.num_groups = num_groups
+ self.drop_path_rate = drop_path_rate
+ self.embedding_dynamic_padding = embedding_dynamic_padding
+ self.output_stride = output_stride
+ self.width_factor = width_factor
+
+ self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)]
+ self._out_features, self._out_indices = get_aligned_output_features_output_indices(
+ out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/bit/convert_bit_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/bit/convert_bit_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..7cc7f64107ce9ee3735dd4e10875c492626cf242
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/bit/convert_bit_to_pytorch.py
@@ -0,0 +1,178 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert BiT checkpoints from the timm library."""
+
+
+import argparse
+import json
+from pathlib import Path
+
+import requests
+import torch
+from huggingface_hub import hf_hub_download
+from PIL import Image
+from timm import create_model
+from timm.data import resolve_data_config
+from timm.data.transforms_factory import create_transform
+
+from transformers import BitConfig, BitForImageClassification, BitImageProcessor
+from transformers.image_utils import PILImageResampling
+from transformers.utils import logging
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+
+def get_config(model_name):
+ repo_id = "huggingface/label-files"
+ filename = "imagenet-1k-id2label.json"
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
+ id2label = {int(k): v for k, v in id2label.items()}
+ label2id = {v: k for k, v in id2label.items()}
+
+ conv_layer = "std_conv" if "bit" in model_name else False
+
+ # note that when using BiT as backbone for ViT-hybrid checkpoints,
+ # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
+ # config.conv_layer = "std_conv_same"
+ config = BitConfig(
+ conv_layer=conv_layer,
+ num_labels=1000,
+ id2label=id2label,
+ label2id=label2id,
+ )
+
+ return config
+
+
+def rename_key(name):
+ if "stem.conv" in name:
+ name = name.replace("stem.conv", "bit.embedder.convolution")
+ if "blocks" in name:
+ name = name.replace("blocks", "layers")
+ if "head.fc" in name:
+ name = name.replace("head.fc", "classifier.1")
+ if name.startswith("norm"):
+ name = "bit." + name
+ if "bit" not in name and "classifier" not in name:
+ name = "bit.encoder." + name
+
+ return name
+
+
+# We will verify our results on an image of cute cats
+def prepare_img():
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ im = Image.open(requests.get(url, stream=True).raw)
+ return im
+
+
+@torch.no_grad()
+def convert_bit_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=False):
+ """
+ Copy/paste/tweak model's weights to our BiT structure.
+ """
+
+ # define default BiT configuration
+ config = get_config(model_name)
+
+ # load original model from timm
+ timm_model = create_model(model_name, pretrained=True)
+ timm_model.eval()
+
+ # load state_dict of original model
+ state_dict = timm_model.state_dict()
+ for key in state_dict.copy().keys():
+ val = state_dict.pop(key)
+ state_dict[rename_key(key)] = val.squeeze() if "head" in key else val
+
+ # load HuggingFace model
+ model = BitForImageClassification(config)
+ model.eval()
+ model.load_state_dict(state_dict)
+
+ # create image processor
+ transform = create_transform(**resolve_data_config({}, model=timm_model))
+ timm_transforms = transform.transforms
+
+ pillow_resamplings = {
+ "bilinear": PILImageResampling.BILINEAR,
+ "bicubic": PILImageResampling.BICUBIC,
+ "nearest": PILImageResampling.NEAREST,
+ }
+
+ processor = BitImageProcessor(
+ do_resize=True,
+ size={"shortest_edge": timm_transforms[0].size},
+ resample=pillow_resamplings[timm_transforms[0].interpolation.value],
+ do_center_crop=True,
+ crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]},
+ do_normalize=True,
+ image_mean=timm_transforms[-1].mean.tolist(),
+ image_std=timm_transforms[-1].std.tolist(),
+ )
+
+ image = prepare_img()
+ timm_pixel_values = transform(image).unsqueeze(0)
+ pixel_values = processor(image, return_tensors="pt").pixel_values
+
+ # verify pixel values
+ assert torch.allclose(timm_pixel_values, pixel_values)
+
+ # verify logits
+ with torch.no_grad():
+ outputs = model(pixel_values)
+ logits = outputs.logits
+
+ print("Logits:", logits[0, :3])
+ print("Predicted class:", model.config.id2label[logits.argmax(-1).item()])
+ timm_logits = timm_model(pixel_values)
+ assert timm_logits.shape == outputs.logits.shape
+ assert torch.allclose(timm_logits, outputs.logits, atol=1e-3)
+ print("Looks ok!")
+
+ if pytorch_dump_folder_path is not None:
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
+ print(f"Saving model {model_name} and processor to {pytorch_dump_folder_path}")
+ model.save_pretrained(pytorch_dump_folder_path)
+ processor.save_pretrained(pytorch_dump_folder_path)
+
+ if push_to_hub:
+ print(f"Pushing model {model_name} and processor to the hub")
+ model.push_to_hub(f"ybelkada/{model_name}")
+ processor.push_to_hub(f"ybelkada/{model_name}")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--model_name",
+ default="resnetv2_50x1_bitm",
+ type=str,
+ help="Name of the BiT timm model you'd like to convert.",
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
+ )
+ parser.add_argument(
+ "--push_to_hub",
+ action="store_true",
+ help="Whether to push the model to the hub.",
+ )
+
+ args = parser.parse_args()
+ convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/bit/image_processing_bit.py b/venv/lib/python3.10/site-packages/transformers/models/bit/image_processing_bit.py
new file mode 100644
index 0000000000000000000000000000000000000000..c9d5c7a7594a495f79d69c5d1c9a924fa24a01ad
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/bit/image_processing_bit.py
@@ -0,0 +1,345 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Image processor class for BiT."""
+
+from typing import Dict, List, Optional, Union
+
+import numpy as np
+
+from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
+from ...image_transforms import (
+ convert_to_rgb,
+ get_resize_output_image_size,
+ resize,
+ to_channel_dimension_format,
+)
+from ...image_utils import (
+ OPENAI_CLIP_MEAN,
+ OPENAI_CLIP_STD,
+ ChannelDimension,
+ ImageInput,
+ PILImageResampling,
+ infer_channel_dimension_format,
+ is_scaled_image,
+ make_list_of_images,
+ to_numpy_array,
+ valid_images,
+ validate_kwargs,
+ validate_preprocess_arguments,
+)
+from ...utils import TensorType, is_vision_available, logging
+
+
+logger = logging.get_logger(__name__)
+
+
+if is_vision_available():
+ import PIL
+
+
+class BitImageProcessor(BaseImageProcessor):
+ r"""
+ Constructs a BiT image processor.
+
+ Args:
+ do_resize (`bool`, *optional*, defaults to `True`):
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
+ `do_resize` in the `preprocess` method.
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
+ Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
+ the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
+ method.
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
+ Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
+ do_center_crop (`bool`, *optional*, defaults to `True`):
+ Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the
+ `preprocess` method.
+ crop_size (`Dict[str, int]` *optional*, defaults to 224):
+ Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
+ method.
+ do_rescale (`bool`, *optional*, defaults to `True`):
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
+ the `preprocess` method.
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
+ Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
+ method.
+ do_normalize:
+ Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `OPENAI_CLIP_MEAN`):
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
+ image_std (`float` or `List[float]`, *optional*, defaults to `OPENAI_CLIP_MEAN`):
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
+ Can be overridden by the `image_std` parameter in the `preprocess` method.
+ do_convert_rgb (`bool`, *optional*, defaults to `True`):
+ Whether to convert the image to RGB.
+ """
+
+ model_input_names = ["pixel_values"]
+
+ def __init__(
+ self,
+ do_resize: bool = True,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
+ do_center_crop: bool = True,
+ crop_size: Dict[str, int] = None,
+ do_rescale: bool = True,
+ rescale_factor: Union[int, float] = 1 / 255,
+ do_normalize: bool = True,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ do_convert_rgb: bool = True,
+ **kwargs,
+ ) -> None:
+ super().__init__(**kwargs)
+ size = size if size is not None else {"shortest_edge": 224}
+ size = get_size_dict(size, default_to_square=False)
+ crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
+ crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size")
+
+ self.do_resize = do_resize
+ self.size = size
+ self.resample = resample
+ self.do_center_crop = do_center_crop
+ self.crop_size = crop_size
+ self.do_rescale = do_rescale
+ self.rescale_factor = rescale_factor
+ self.do_normalize = do_normalize
+ self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
+ self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
+ self.do_convert_rgb = do_convert_rgb
+ self._valid_processor_keys = [
+ "images",
+ "do_resize",
+ "size",
+ "resample",
+ "do_center_crop",
+ "crop_size",
+ "do_rescale",
+ "rescale_factor",
+ "do_normalize",
+ "image_mean",
+ "image_std",
+ "do_convert_rgb",
+ "return_tensors",
+ "data_format",
+ "input_data_format",
+ ]
+
+ # Copied from transformers.models.clip.image_processing_clip.CLIPImageProcessor.resize
+ def resize(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> np.ndarray:
+ """
+ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
+ resized to keep the input aspect ratio.
+
+ Args:
+ image (`np.ndarray`):
+ Image to resize.
+ size (`Dict[str, int]`):
+ Size of the output image.
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
+ Resampling filter to use when resiizing the image.
+ data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred.
+ """
+ default_to_square = True
+ if "shortest_edge" in size:
+ size = size["shortest_edge"]
+ default_to_square = False
+ elif "height" in size and "width" in size:
+ size = (size["height"], size["width"])
+ else:
+ raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.")
+
+ output_size = get_resize_output_image_size(
+ image,
+ size=size,
+ default_to_square=default_to_square,
+ input_data_format=input_data_format,
+ )
+ return resize(
+ image,
+ size=output_size,
+ resample=resample,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ **kwargs,
+ )
+
+ def preprocess(
+ self,
+ images: ImageInput,
+ do_resize: bool = None,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = None,
+ do_center_crop: bool = None,
+ crop_size: int = None,
+ do_rescale: bool = None,
+ rescale_factor: float = None,
+ do_normalize: bool = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ do_convert_rgb: bool = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> PIL.Image.Image:
+ """
+ Preprocess an image or batch of images.
+
+ Args:
+ images (`ImageInput`):
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
+ Whether to resize the image.
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
+ Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
+ the longest edge resized to keep the input aspect ratio.
+ resample (`int`, *optional*, defaults to `self.resample`):
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
+ has an effect if `do_resize` is set to `True`.
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
+ Whether to center crop the image.
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
+ Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
+ Whether to rescale the image.
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
+ Whether to normalize the image.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
+ Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
+ Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
+ `True`.
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
+ Whether to convert the image to RGB.
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Can be one of:
+ - Unset: Return a list of `np.ndarray`.
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format for the output image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - Unset: Use the channel dimension format of the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ """
+ do_resize = do_resize if do_resize is not None else self.do_resize
+ size = size if size is not None else self.size
+ size = get_size_dict(size, param_name="size", default_to_square=False)
+ resample = resample if resample is not None else self.resample
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
+ crop_size = crop_size if crop_size is not None else self.crop_size
+ crop_size = get_size_dict(crop_size, param_name="crop_size", default_to_square=True)
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
+ image_mean = image_mean if image_mean is not None else self.image_mean
+ image_std = image_std if image_std is not None else self.image_std
+ do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
+
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
+
+ images = make_list_of_images(images)
+
+ if not valid_images(images):
+ raise ValueError(
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+
+ validate_preprocess_arguments(
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ do_center_crop=do_center_crop,
+ crop_size=crop_size,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ )
+
+ # PIL RGBA images are converted to RGB
+ if do_convert_rgb:
+ images = [convert_to_rgb(image) for image in images]
+
+ # All transformations expect numpy arrays.
+ images = [to_numpy_array(image) for image in images]
+
+ if is_scaled_image(images[0]) and do_rescale:
+ logger.warning_once(
+ "It looks like you are trying to rescale already rescaled images. If the input"
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
+ )
+
+ if input_data_format is None:
+ # We assume that all images have the same channel dimension format.
+ input_data_format = infer_channel_dimension_format(images[0])
+
+ if do_resize:
+ images = [
+ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ if do_center_crop:
+ images = [
+ self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
+ ]
+
+ if do_rescale:
+ images = [
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ if do_normalize:
+ images = [
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ images = [
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
+ ]
+
+ data = {"pixel_values": images}
+ return BatchFeature(data=data, tensor_type=return_tensors)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/bit/modeling_bit.py b/venv/lib/python3.10/site-packages/transformers/models/bit/modeling_bit.py
new file mode 100644
index 0000000000000000000000000000000000000000..27141a9009e540780373ff6b5ebc450883784335
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/bit/modeling_bit.py
@@ -0,0 +1,898 @@
+# coding=utf-8
+# Copyright 2022 Google AI and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch BiT model. Also supports backbone for ViT hybrid."""
+
+import collections
+import math
+from typing import Optional, Tuple
+
+import numpy as np
+import torch
+import torch.utils.checkpoint
+from torch import Tensor, nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import (
+ BackboneOutput,
+ BaseModelOutputWithNoAttention,
+ BaseModelOutputWithPoolingAndNoAttention,
+ ImageClassifierOutputWithNoAttention,
+)
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from ...utils.backbone_utils import BackboneMixin
+from .configuration_bit import BitConfig
+
+
+logger = logging.get_logger(__name__)
+
+# General docstring
+_CONFIG_FOR_DOC = "BitConfig"
+
+# Base docstring
+_CHECKPOINT_FOR_DOC = "google/bit-50"
+_EXPECTED_OUTPUT_SHAPE = [1, 2048, 7, 7]
+
+# Image classification docstring
+_IMAGE_CLASS_CHECKPOINT = "google/bit-50"
+_IMAGE_CLASS_EXPECTED_OUTPUT = "tiger cat"
+
+
+from ..deprecated._archive_maps import BIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+def get_padding_value(padding=None, kernel_size=7, stride=1, dilation=1) -> Tuple[Tuple, bool]:
+ r"""
+ Utility function to get the tuple padding value given the kernel_size and padding.
+
+ Args:
+ padding (Union[`str`, `int`], *optional*):
+ Padding value, can be either `"same"`, `"valid"`. If a different value is provided the default padding from
+ PyTorch is used.
+ kernel_size (`int`, *optional*, defaults to 7):
+ Kernel size of the convolution layers.
+ stride (`int`, *optional*, defaults to 1):
+ Stride value of the convolution layers.
+ dilation (`int`, *optional*, defaults to 1):
+ Dilation value of the convolution layers.
+ """
+ dynamic = False
+ if padding is None:
+ padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
+ return padding, dynamic
+
+ if isinstance(padding, str):
+ # for any string padding, the padding will be calculated for you, one of three ways
+ padding = padding.lower()
+ if padding == "same":
+ # TF compatible 'SAME' padding, has a performance and GPU memory allocation impact
+ if stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0:
+ # static case, no extra overhead
+ padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
+ else:
+ # dynamic 'SAME' padding, has runtime/GPU memory overhead
+ padding = 0
+ dynamic = True
+ elif padding == "valid":
+ # 'VALID' padding, same as padding=0
+ padding = 0
+ else:
+ # Default to PyTorch style 'same'-ish symmetric padding
+ padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
+ return padding, dynamic
+
+
+class WeightStandardizedConv2d(nn.Conv2d):
+ """Conv2d with Weight Standardization. Includes TensorFlow compatible SAME padding. Used for ViT Hybrid model.
+
+ Paper: [Micro-Batch Training with Batch-Channel Normalization and Weight
+ Standardization](https://arxiv.org/abs/1903.10520v2)
+ """
+
+ def __init__(
+ self,
+ in_channel,
+ out_channels,
+ kernel_size,
+ stride=1,
+ padding="SAME",
+ dilation=1,
+ groups=1,
+ bias=False,
+ eps=1e-6,
+ ):
+ padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation)
+ super().__init__(
+ in_channel,
+ out_channels,
+ kernel_size,
+ stride=stride,
+ padding=padding,
+ dilation=dilation,
+ groups=groups,
+ bias=bias,
+ )
+ if is_dynamic:
+ self.pad = DynamicPad2d(kernel_size, stride, dilation)
+ else:
+ self.pad = None
+ self.eps = eps
+
+ def forward(self, hidden_state):
+ if self.pad is not None:
+ hidden_state = self.pad(hidden_state)
+ weight = nn.functional.batch_norm(
+ self.weight.reshape(1, self.out_channels, -1), None, None, training=True, momentum=0.0, eps=self.eps
+ ).reshape_as(self.weight)
+ hidden_state = nn.functional.conv2d(
+ hidden_state, weight, self.bias, self.stride, self.padding, self.dilation, self.groups
+ )
+ return hidden_state
+
+
+class BitGroupNormActivation(nn.GroupNorm):
+ r"""
+ A module that combines group normalization with an activation function.
+ """
+
+ def __init__(self, config, num_channels, eps=1e-5, affine=True, apply_activation=True):
+ super(BitGroupNormActivation, self).__init__(config.num_groups, num_channels, eps=eps, affine=affine)
+ if apply_activation:
+ self.activation = ACT2FN[config.hidden_act]
+ else:
+ self.activation = nn.Identity()
+
+ def forward(self, hidden_state):
+ hidden_state = nn.functional.group_norm(hidden_state, self.num_groups, self.weight, self.bias, self.eps)
+ hidden_state = self.activation(hidden_state)
+ return hidden_state
+
+
+class DynamicPad2d(nn.Module):
+ r"""
+ A module that wraps dynamic padding of any input, given the parameters of the convolutional layer and the input
+ hidden states.
+ """
+
+ def __init__(self, kernel_size, stride, dilation, value=0):
+ super().__init__()
+ # Safety checkers
+ if isinstance(kernel_size, int):
+ kernel_size = (kernel_size, kernel_size)
+
+ if isinstance(stride, int):
+ stride = (stride, stride)
+
+ if isinstance(dilation, int):
+ dilation = (dilation, dilation)
+
+ self.kernel_size = kernel_size
+ self.stride = stride
+ self.dilation = dilation
+ self.value = value
+
+ def compute_padding(x, kernel_size, stride, dilation):
+ return max((math.ceil(x / stride) - 1) * stride + (kernel_size - 1) * dilation + 1 - x, 0)
+
+ self.compute_padding = compute_padding
+
+ def __call__(self, input):
+ # Get width and height
+ input_height, input_width = input.size()[-2:]
+
+ # Compute the padding values
+ padding_height = self.compute_padding(input_height, self.kernel_size[0], self.stride[0], self.dilation[0])
+ padding_width = self.compute_padding(input_width, self.kernel_size[1], self.stride[1], self.dilation[1])
+
+ # apply pad
+ if padding_height > 0 or padding_width > 0:
+ input = nn.functional.pad(
+ input,
+ [
+ padding_width // 2,
+ padding_width - padding_width // 2,
+ padding_height // 2,
+ padding_height - padding_height // 2,
+ ],
+ value=self.value,
+ )
+ return input
+
+
+class BitMaxPool2d(nn.MaxPool2d):
+ """Tensorflow like 'SAME' wrapper for 2D max pooling"""
+
+ def __init__(
+ self,
+ kernel_size: int,
+ stride=None,
+ dilation=1,
+ ceil_mode=False,
+ padding=(0, 0),
+ padding_value=0,
+ use_dynamic_padding=True,
+ ):
+ kernel_size = kernel_size if isinstance(kernel_size, collections.abc.Iterable) else (kernel_size, kernel_size)
+ stride = stride if isinstance(stride, collections.abc.Iterable) else (stride, stride)
+ dilation = dilation if isinstance(dilation, collections.abc.Iterable) else (dilation, dilation)
+ super().__init__(kernel_size, stride, padding, dilation, ceil_mode)
+ if use_dynamic_padding:
+ self.pad = DynamicPad2d(kernel_size, stride, dilation, padding_value)
+ else:
+ self.pad = nn.Identity()
+
+ def forward(self, hidden_states):
+ hidden_states = self.pad(hidden_states)
+ return nn.functional.max_pool2d(
+ hidden_states, self.kernel_size, self.stride, self.padding, self.dilation, self.ceil_mode
+ )
+
+
+class BitEmbeddings(nn.Module):
+ """
+ BiT Embeddings (stem) composed of a single aggressive convolution.
+ """
+
+ def __init__(self, config: BitConfig):
+ super().__init__()
+
+ self.convolution = WeightStandardizedConv2d(
+ config.num_channels,
+ config.embedding_size,
+ kernel_size=7,
+ stride=2,
+ eps=1e-8,
+ padding=config.global_padding,
+ )
+
+ self.pooler = BitMaxPool2d(kernel_size=3, stride=2, use_dynamic_padding=config.embedding_dynamic_padding)
+
+ # Use the same padding strategy as convolutional layers
+ if config.global_padding is not None and config.global_padding.upper() == "SAME":
+ self.pad = nn.Identity()
+ else:
+ self.pad = nn.ConstantPad2d(padding=(1, 1, 1, 1), value=0.0)
+
+ if not config.layer_type == "preactivation":
+ self.norm = BitGroupNormActivation(config, num_channels=config.embedding_size)
+ else:
+ self.norm = nn.Identity()
+
+ self.num_channels = config.num_channels
+
+ def forward(self, pixel_values: Tensor) -> Tensor:
+ num_channels = pixel_values.shape[1]
+ if num_channels != self.num_channels:
+ raise ValueError(
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
+ )
+
+ embedding = self.convolution(pixel_values)
+
+ embedding = self.pad(embedding)
+
+ embedding = self.norm(embedding)
+
+ embedding = self.pooler(embedding)
+
+ return embedding
+
+
+# Copied from transformers.models.convnext.modeling_convnext.drop_path
+def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
+ """
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
+
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
+ argument.
+ """
+ if drop_prob == 0.0 or not training:
+ return input
+ keep_prob = 1 - drop_prob
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
+ random_tensor.floor_() # binarize
+ output = input.div(keep_prob) * random_tensor
+ return output
+
+
+# Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->Bit
+class BitDropPath(nn.Module):
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
+
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
+ super().__init__()
+ self.drop_prob = drop_prob
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ return drop_path(hidden_states, self.drop_prob, self.training)
+
+ def extra_repr(self) -> str:
+ return "p={}".format(self.drop_prob)
+
+
+def make_div(value, divisor=8):
+ min_value = divisor
+ new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)
+ if new_value < 0.9 * value:
+ new_value += divisor
+ return new_value
+
+
+class BitPreActivationBottleneckLayer(nn.Module):
+ """Pre-activation (v2) bottleneck block.
+ Follows the implementation of "Identity Mappings in Deep Residual Networks":
+ https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua
+
+ Except it puts the stride on 3x3 conv when available.
+ """
+
+ def __init__(
+ self,
+ config,
+ in_channels,
+ out_channels=None,
+ bottle_ratio=0.25,
+ stride=1,
+ dilation=1,
+ first_dilation=None,
+ groups=1,
+ drop_path_rate=0.0,
+ is_first_layer=False,
+ ):
+ super().__init__()
+
+ first_dilation = first_dilation or dilation
+
+ out_channels = out_channels or in_channels
+ mid_channels = make_div(out_channels * bottle_ratio)
+
+ if is_first_layer:
+ self.downsample = BitDownsampleConv(
+ config,
+ in_channels,
+ out_channels,
+ stride=stride,
+ preact=True,
+ )
+ else:
+ self.downsample = None
+
+ self.norm1 = BitGroupNormActivation(config, in_channels)
+ self.conv1 = WeightStandardizedConv2d(in_channels, mid_channels, 1, eps=1e-8, padding=config.global_padding)
+
+ self.norm2 = BitGroupNormActivation(config, num_channels=mid_channels)
+ self.conv2 = WeightStandardizedConv2d(
+ mid_channels, mid_channels, 3, stride=stride, groups=groups, eps=1e-8, padding=config.global_padding
+ )
+
+ self.norm3 = BitGroupNormActivation(config, mid_channels)
+ self.conv3 = WeightStandardizedConv2d(mid_channels, out_channels, 1, eps=1e-8, padding=config.global_padding)
+
+ self.drop_path = BitDropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()
+
+ def forward(self, hidden_states):
+ hidden_states_preact = self.norm1(hidden_states)
+
+ # shortcut branch
+ shortcut = hidden_states
+ if self.downsample is not None:
+ shortcut = self.downsample(hidden_states_preact)
+
+ # residual branch
+ hidden_states = self.conv1(hidden_states_preact)
+ hidden_states = self.conv2(self.norm2(hidden_states))
+ hidden_states = self.conv3(self.norm3(hidden_states))
+ hidden_states = self.drop_path(hidden_states)
+ return hidden_states + shortcut
+
+
+class BitBottleneckLayer(nn.Module):
+ """Non Pre-activation bottleneck block, equivalent to V1.5/V1b bottleneck. Used for ViT Hybrid."""
+
+ def __init__(
+ self,
+ config,
+ in_channels,
+ out_channels=None,
+ bottle_ratio=0.25,
+ stride=1,
+ dilation=1,
+ first_dilation=None,
+ groups=1,
+ drop_path_rate=0.0,
+ is_first_layer=False,
+ ):
+ super().__init__()
+ first_dilation = first_dilation or dilation
+
+ out_channels = out_channels or in_channels
+ mid_chs = make_div(out_channels * bottle_ratio)
+
+ if is_first_layer:
+ self.downsample = BitDownsampleConv(
+ config,
+ in_channels,
+ out_channels,
+ stride=stride,
+ preact=False,
+ )
+ else:
+ self.downsample = None
+
+ self.conv1 = WeightStandardizedConv2d(in_channels, mid_chs, 1, eps=1e-8, padding=config.global_padding)
+ self.norm1 = BitGroupNormActivation(config, num_channels=mid_chs)
+ self.conv2 = WeightStandardizedConv2d(
+ mid_chs,
+ mid_chs,
+ 3,
+ stride=stride,
+ dilation=first_dilation,
+ groups=groups,
+ eps=1e-8,
+ padding=config.global_padding,
+ )
+ self.norm2 = BitGroupNormActivation(config, num_channels=mid_chs)
+ self.conv3 = WeightStandardizedConv2d(mid_chs, out_channels, 1, eps=1e-8, padding=config.global_padding)
+ self.norm3 = BitGroupNormActivation(config, num_channels=out_channels, apply_activation=False)
+ self.drop_path = BitDropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()
+
+ self.activation = ACT2FN[config.hidden_act]
+
+ def forward(self, hidden_states):
+ # shortcut branch
+ shortcut = hidden_states
+ if self.downsample is not None:
+ shortcut = self.downsample(hidden_states)
+
+ # residual
+ hidden_states = self.conv1(hidden_states)
+ hidden_states = self.norm1(hidden_states)
+
+ hidden_states = self.conv2(hidden_states)
+ hidden_states = self.norm2(hidden_states)
+
+ hidden_states = self.conv3(hidden_states)
+ hidden_states = self.norm3(hidden_states)
+
+ hidden_states = self.drop_path(hidden_states)
+ hidden_states = self.activation(hidden_states + shortcut)
+ return hidden_states
+
+
+class BitDownsampleConv(nn.Module):
+ def __init__(
+ self,
+ config,
+ in_channels,
+ out_channels,
+ stride=1,
+ preact=True,
+ ):
+ super().__init__()
+ self.conv = WeightStandardizedConv2d(
+ in_channels, out_channels, 1, stride=stride, eps=1e-8, padding=config.global_padding
+ )
+ self.norm = (
+ nn.Identity()
+ if preact
+ else BitGroupNormActivation(config, num_channels=out_channels, apply_activation=False)
+ )
+
+ def forward(self, x):
+ return self.norm(self.conv(x))
+
+
+class BitStage(nn.Module):
+ """
+ A ResNet v2 stage composed by stacked layers.
+ """
+
+ def __init__(
+ self,
+ config,
+ in_channels,
+ out_channels,
+ stride,
+ dilation,
+ depth,
+ bottle_ratio=0.25,
+ layer_dropout=None,
+ ):
+ super().__init__()
+
+ first_dilation = 1 if dilation in (1, 2) else 2
+
+ # Get the layer type
+ if config.layer_type == "bottleneck":
+ layer_cls = BitBottleneckLayer
+ else:
+ layer_cls = BitPreActivationBottleneckLayer
+
+ prev_chs = in_channels
+ self.layers = nn.Sequential()
+ for layer_idx in range(depth):
+ # Get the current hyper-parameters
+ stride, drop_path_rate, is_first_layer = self._get_updated_hyperparameters(
+ layer_idx, stride, layer_dropout
+ )
+
+ self.layers.add_module(
+ str(layer_idx),
+ layer_cls(
+ config,
+ prev_chs,
+ out_channels,
+ stride=stride,
+ dilation=dilation,
+ bottle_ratio=bottle_ratio,
+ first_dilation=first_dilation,
+ drop_path_rate=drop_path_rate,
+ is_first_layer=is_first_layer,
+ ),
+ )
+ prev_chs = out_channels
+ first_dilation = dilation
+
+ def _get_updated_hyperparameters(self, layer_idx, stride, layer_dropout):
+ r"""
+ Get the new hyper-parameters with respect to the previous ones and the index of the current layer.
+ """
+ if layer_dropout:
+ drop_path_rate = layer_dropout[layer_idx]
+ else:
+ drop_path_rate = 0.0
+
+ if layer_idx != 0:
+ stride = 1
+
+ is_first_layer = layer_idx == 0
+
+ return stride, drop_path_rate, is_first_layer
+
+ def forward(self, input: Tensor) -> Tensor:
+ hidden_state = input
+ for _, layer in enumerate(self.layers):
+ hidden_state = layer(hidden_state)
+ return hidden_state
+
+
+class BitEncoder(nn.Module):
+ def __init__(self, config: BitConfig):
+ super().__init__()
+ self.stages = nn.ModuleList([])
+
+ prev_chs = config.embedding_size
+
+ # These needs to stay hardcoded
+ current_stride = 4
+ dilation = 1
+
+ layer_dropouts = [
+ x.tolist()
+ for x in torch.Tensor(np.linspace(0, config.drop_path_rate, sum(config.depths))).split(config.depths)
+ ]
+
+ for stage_idx, (current_depth, current_hidden_size, layer_dropout) in enumerate(
+ zip(config.depths, config.hidden_sizes, layer_dropouts)
+ ):
+ # Get the updated hyper params
+ out_channels, stride, dilation = self._get_updated_hyperparameters(
+ stage_idx, current_stride, current_hidden_size, dilation, config
+ )
+
+ stage = BitStage(
+ config,
+ prev_chs,
+ out_channels,
+ stride=stride,
+ dilation=dilation,
+ depth=current_depth,
+ layer_dropout=layer_dropout,
+ )
+
+ prev_chs = out_channels
+ current_stride *= stride
+
+ self.stages.add_module(str(stage_idx), stage)
+
+ def _get_updated_hyperparameters(self, stage_idx, current_stride, current_hidden_size, dilation, config):
+ out_channels = make_div(current_hidden_size * config.width_factor)
+ stride = 1 if stage_idx == 0 else 2
+ if current_stride >= config.output_stride:
+ dilation *= stride
+ stride = 1
+ return out_channels, stride, dilation
+
+ def forward(
+ self, hidden_state: Tensor, output_hidden_states: bool = False, return_dict: bool = True
+ ) -> BaseModelOutputWithNoAttention:
+ hidden_states = () if output_hidden_states else None
+
+ for stage_module in self.stages:
+ if output_hidden_states:
+ hidden_states = hidden_states + (hidden_state,)
+
+ hidden_state = stage_module(hidden_state)
+
+ if output_hidden_states:
+ hidden_states = hidden_states + (hidden_state,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_state, hidden_states] if v is not None)
+
+ return BaseModelOutputWithNoAttention(
+ last_hidden_state=hidden_state,
+ hidden_states=hidden_states,
+ )
+
+
+class BitPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = BitConfig
+ base_model_prefix = "bit"
+ main_input_name = "pixel_values"
+
+ def _init_weights(self, module):
+ if isinstance(module, nn.Conv2d):
+ nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu")
+ elif isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)):
+ nn.init.constant_(module.weight, 1)
+ nn.init.constant_(module.bias, 0)
+
+
+BIT_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`BitConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+BIT_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`BitImageProcessor.__call__`]
+ for details.
+
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare BiT model outputting raw features without any specific head on top.",
+ BIT_START_DOCSTRING,
+)
+class BitModel(BitPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.config = config
+
+ self.embedder = BitEmbeddings(config)
+
+ self.encoder = BitEncoder(config)
+ self.norm = (
+ BitGroupNormActivation(config, num_channels=config.hidden_sizes[-1])
+ if config.layer_type == "preactivation"
+ else nn.Identity()
+ )
+
+ self.pooler = nn.AdaptiveAvgPool2d((1, 1))
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(BIT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithPoolingAndNoAttention,
+ config_class=_CONFIG_FOR_DOC,
+ modality="vision",
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
+ )
+ def forward(
+ self, pixel_values: Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None
+ ) -> BaseModelOutputWithPoolingAndNoAttention:
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ embedding_output = self.embedder(pixel_values)
+
+ encoder_outputs = self.encoder(
+ embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict
+ )
+
+ last_hidden_state = encoder_outputs[0]
+
+ last_hidden_state = self.norm(last_hidden_state)
+
+ pooled_output = self.pooler(last_hidden_state)
+
+ if not return_dict:
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPoolingAndNoAttention(
+ last_hidden_state=last_hidden_state,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ )
+
+
+@add_start_docstrings(
+ """
+ BiT Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
+ ImageNet.
+ """,
+ BIT_START_DOCSTRING,
+)
+class BitForImageClassification(BitPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.bit = BitModel(config)
+ # classification head
+ self.classifier = nn.Sequential(
+ nn.Flatten(),
+ nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity(),
+ )
+ # initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(BIT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
+ output_type=ImageClassifierOutputWithNoAttention,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
+ )
+ def forward(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> ImageClassifierOutputWithNoAttention:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.bit(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
+
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
+
+ logits = self.classifier(pooled_output)
+
+ loss = None
+
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return (loss,) + output if loss is not None else output
+
+ return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
+
+
+@add_start_docstrings(
+ """
+ BiT backbone, to be used with frameworks like DETR and MaskFormer.
+ """,
+ BIT_START_DOCSTRING,
+)
+class BitBackbone(BitPreTrainedModel, BackboneMixin):
+ def __init__(self, config):
+ super().__init__(config)
+ super()._init_backbone(config)
+
+ self.bit = BitModel(config)
+ self.num_features = [config.embedding_size] + config.hidden_sizes
+
+ # initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(BIT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self, pixel_values: Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None
+ ) -> BackboneOutput:
+ """
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, AutoBackbone
+ >>> import torch
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> processor = AutoImageProcessor.from_pretrained("google/resnetnv2-50")
+ >>> model = AutoBackbone.from_pretrained("google/resnetnv2-50")
+
+ >>> inputs = processor(image, return_tensors="pt")
+ >>> outputs = model(**inputs)
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+
+ outputs = self.bit(pixel_values, output_hidden_states=True, return_dict=True)
+
+ hidden_states = outputs.hidden_states
+
+ feature_maps = ()
+ for idx, stage in enumerate(self.stage_names):
+ if stage in self.out_features:
+ feature_maps += (hidden_states[idx],)
+
+ if not return_dict:
+ output = (feature_maps,)
+ if output_hidden_states:
+ output += (outputs.hidden_states,)
+ return output
+
+ return BackboneOutput(
+ feature_maps=feature_maps,
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
+ attentions=None,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/dinov2/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/dinov2/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..01d02a9e65fda02e543b116dc4bf7ccba6097c6e
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/dinov2/__init__.py
@@ -0,0 +1,61 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "configuration_dinov2": ["DINOV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Dinov2Config", "Dinov2OnnxConfig"]
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_dinov2"] = [
+ "DINOV2_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "Dinov2ForImageClassification",
+ "Dinov2Model",
+ "Dinov2PreTrainedModel",
+ "Dinov2Backbone",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_dinov2 import DINOV2_PRETRAINED_CONFIG_ARCHIVE_MAP, Dinov2Config, Dinov2OnnxConfig
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_dinov2 import (
+ DINOV2_PRETRAINED_MODEL_ARCHIVE_LIST,
+ Dinov2Backbone,
+ Dinov2ForImageClassification,
+ Dinov2Model,
+ Dinov2PreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cde5c90a53b5f2b99a0f4fed810e28f068ea81af
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/configuration_dinov2.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/configuration_dinov2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..633fa0568bab0731ff37a22cab6c72f31f854fea
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/configuration_dinov2.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/convert_dinov2_to_hf.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/convert_dinov2_to_hf.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ce86daf9818e6b206d136ec80e81984a0495d798
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/convert_dinov2_to_hf.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/modeling_dinov2.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/modeling_dinov2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b45e5fa59d11d6e0b6bc4e8b69d57c2990d2e70d
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/dinov2/__pycache__/modeling_dinov2.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/dinov2/configuration_dinov2.py b/venv/lib/python3.10/site-packages/transformers/models/dinov2/configuration_dinov2.py
new file mode 100644
index 0000000000000000000000000000000000000000..b5fe872a706fc71808562e8152db4eee4ca7218f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/dinov2/configuration_dinov2.py
@@ -0,0 +1,175 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" DINOv2 model configuration"""
+
+from collections import OrderedDict
+from typing import Mapping
+
+from packaging import version
+
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxConfig
+from ...utils import logging
+from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import DINOV2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class Dinov2Config(BackboneConfigMixin, PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`Dinov2Model`]. It is used to instantiate an
+ Dinov2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the Dinov2
+ [google/dinov2-base-patch16-224](https://huggingface.co/google/dinov2-base-patch16-224) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ mlp_ratio (`int`, *optional*, defaults to 4):
+ Ratio of the hidden size of the MLPs relative to the `hidden_size`.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
+ The epsilon used by the layer normalization layers.
+ image_size (`int`, *optional*, defaults to 224):
+ The size (resolution) of each image.
+ patch_size (`int`, *optional*, defaults to 16):
+ The size (resolution) of each patch.
+ num_channels (`int`, *optional*, defaults to 3):
+ The number of input channels.
+ qkv_bias (`bool`, *optional*, defaults to `True`):
+ Whether to add a bias to the queries, keys and values.
+ layerscale_value (`float`, *optional*, defaults to 1.0):
+ Initial value to use for layer scale.
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
+ Stochastic depth rate per sample (when applied in the main path of residual layers).
+ use_swiglu_ffn (`bool`, *optional*, defaults to `False`):
+ Whether to use the SwiGLU feedforward neural network.
+ out_features (`List[str]`, *optional*):
+ If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
+ (depending on how many stages the model has). If unset and `out_indices` is set, will default to the
+ corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
+ same order as defined in the `stage_names` attribute.
+ out_indices (`List[int]`, *optional*):
+ If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
+ many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
+ If unset and `out_features` is unset, will default to the last stage. Must be in the
+ same order as defined in the `stage_names` attribute.
+ apply_layernorm (`bool`, *optional*, defaults to `True`):
+ Whether to apply layer normalization to the feature maps in case the model is used as backbone.
+ reshape_hidden_states (`bool`, *optional*, defaults to `True`):
+ Whether to reshape the feature maps to 4D tensors of shape `(batch_size, hidden_size, height, width)` in
+ case the model is used as backbone. If `False`, the feature maps will be 3D tensors of shape `(batch_size,
+ seq_len, hidden_size)`.
+
+ Example:
+
+ ```python
+ >>> from transformers import Dinov2Config, Dinov2Model
+
+ >>> # Initializing a Dinov2 dinov2-base-patch16-224 style configuration
+ >>> configuration = Dinov2Config()
+
+ >>> # Initializing a model (with random weights) from the dinov2-base-patch16-224 style configuration
+ >>> model = Dinov2Model(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "dinov2"
+
+ def __init__(
+ self,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ mlp_ratio=4,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.0,
+ attention_probs_dropout_prob=0.0,
+ initializer_range=0.02,
+ layer_norm_eps=1e-6,
+ image_size=224,
+ patch_size=16,
+ num_channels=3,
+ qkv_bias=True,
+ layerscale_value=1.0,
+ drop_path_rate=0.0,
+ use_swiglu_ffn=False,
+ out_features=None,
+ out_indices=None,
+ apply_layernorm=True,
+ reshape_hidden_states=True,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.mlp_ratio = mlp_ratio
+ self.hidden_act = hidden_act
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.qkv_bias = qkv_bias
+ self.layerscale_value = layerscale_value
+ self.drop_path_rate = drop_path_rate
+ self.use_swiglu_ffn = use_swiglu_ffn
+ self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, num_hidden_layers + 1)]
+ self._out_features, self._out_indices = get_aligned_output_features_output_indices(
+ out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
+ )
+ self.apply_layernorm = apply_layernorm
+ self.reshape_hidden_states = reshape_hidden_states
+
+
+class Dinov2OnnxConfig(OnnxConfig):
+ torch_onnx_minimum_version = version.parse("1.11")
+
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ return OrderedDict(
+ [
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
+ ]
+ )
+
+ @property
+ def atol_for_validation(self) -> float:
+ return 1e-4
diff --git a/venv/lib/python3.10/site-packages/transformers/models/dinov2/convert_dinov2_to_hf.py b/venv/lib/python3.10/site-packages/transformers/models/dinov2/convert_dinov2_to_hf.py
new file mode 100644
index 0000000000000000000000000000000000000000..dd5871e6c440661a7050bab7696db39e865b714d
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/dinov2/convert_dinov2_to_hf.py
@@ -0,0 +1,287 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert DINOv2 checkpoints from the original repository.
+
+URL: https://github.com/facebookresearch/dinov2/tree/main
+"""
+
+
+import argparse
+import json
+from pathlib import Path
+
+import requests
+import torch
+import torch.nn as nn
+from huggingface_hub import hf_hub_download
+from PIL import Image
+from torchvision import transforms
+
+from transformers import BitImageProcessor, Dinov2Config, Dinov2ForImageClassification, Dinov2Model
+from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
+from transformers.utils import logging
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+
+def get_dinov2_config(model_name, image_classifier=False):
+ config = Dinov2Config(image_size=518, patch_size=14)
+
+ # size of the architecture
+ if "vits" in model_name:
+ config.hidden_size = 384
+ config.num_attention_heads = 6
+ elif "vitb" in model_name:
+ pass
+ elif "vitl" in model_name:
+ config.hidden_size = 1024
+ config.num_hidden_layers = 24
+ config.num_attention_heads = 16
+ elif "vitg" in model_name:
+ config.use_swiglu_ffn = True
+ config.hidden_size = 1536
+ config.num_hidden_layers = 40
+ config.num_attention_heads = 24
+ else:
+ raise ValueError("Model not supported")
+
+ if image_classifier:
+ repo_id = "huggingface/label-files"
+ filename = "imagenet-1k-id2label.json"
+ config.num_labels = 1000
+ config.id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
+ config.id2label = {int(k): v for k, v in config.id2label.items()}
+
+ return config
+
+
+def create_rename_keys(config):
+ rename_keys = []
+ # fmt: off
+
+ # patch embedding layer
+ rename_keys.append(("cls_token", "embeddings.cls_token"))
+ rename_keys.append(("mask_token", "embeddings.mask_token"))
+ rename_keys.append(("pos_embed", "embeddings.position_embeddings"))
+ rename_keys.append(("patch_embed.proj.weight", "embeddings.patch_embeddings.projection.weight"))
+ rename_keys.append(("patch_embed.proj.bias", "embeddings.patch_embeddings.projection.bias"))
+
+ for i in range(config.num_hidden_layers):
+ # layernorms
+ rename_keys.append((f"blocks.{i}.norm1.weight", f"encoder.layer.{i}.norm1.weight"))
+ rename_keys.append((f"blocks.{i}.norm1.bias", f"encoder.layer.{i}.norm1.bias"))
+ rename_keys.append((f"blocks.{i}.norm2.weight", f"encoder.layer.{i}.norm2.weight"))
+ rename_keys.append((f"blocks.{i}.norm2.bias", f"encoder.layer.{i}.norm2.bias"))
+ # MLP
+ if config.use_swiglu_ffn:
+ rename_keys.append((f"blocks.{i}.mlp.w12.weight", f"encoder.layer.{i}.mlp.w12.weight"))
+ rename_keys.append((f"blocks.{i}.mlp.w12.bias", f"encoder.layer.{i}.mlp.w12.bias"))
+ rename_keys.append((f"blocks.{i}.mlp.w3.weight", f"encoder.layer.{i}.mlp.w3.weight"))
+ rename_keys.append((f"blocks.{i}.mlp.w3.bias", f"encoder.layer.{i}.mlp.w3.bias"))
+ else:
+ rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"encoder.layer.{i}.mlp.fc1.weight"))
+ rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"encoder.layer.{i}.mlp.fc1.bias"))
+ rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"encoder.layer.{i}.mlp.fc2.weight"))
+ rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"encoder.layer.{i}.mlp.fc2.bias"))
+ # layerscale
+ rename_keys.append((f"blocks.{i}.ls1.gamma", f"encoder.layer.{i}.layer_scale1.lambda1"))
+ rename_keys.append((f"blocks.{i}.ls2.gamma", f"encoder.layer.{i}.layer_scale2.lambda1"))
+ # attention projection layer
+ rename_keys.append((f"blocks.{i}.attn.proj.weight", f"encoder.layer.{i}.attention.output.dense.weight"))
+ rename_keys.append((f"blocks.{i}.attn.proj.bias", f"encoder.layer.{i}.attention.output.dense.bias"))
+
+ # final layernorm
+ rename_keys.append(("norm.weight", "layernorm.weight"))
+ rename_keys.append(("norm.bias", "layernorm.bias"))
+
+ # fmt: on
+ return rename_keys
+
+
+def rename_key(dct, old, new):
+ val = dct.pop(old)
+ dct[new] = val
+
+
+# we split up the matrix of each encoder layer into queries, keys and values
+def read_in_q_k_v(state_dict, config):
+ for i in range(config.num_hidden_layers):
+ # read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
+ in_proj_weight = state_dict.pop(f"blocks.{i}.attn.qkv.weight")
+ in_proj_bias = state_dict.pop(f"blocks.{i}.attn.qkv.bias")
+ # next, add query, keys and values (in that order) to the state dict
+ state_dict[f"encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[: config.hidden_size, :]
+ state_dict[f"encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size]
+ state_dict[f"encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[
+ config.hidden_size : config.hidden_size * 2, :
+ ]
+ state_dict[f"encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[
+ config.hidden_size : config.hidden_size * 2
+ ]
+ state_dict[f"encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[-config.hidden_size :, :]
+ state_dict[f"encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :]
+
+
+# We will verify our results on an image of cute cats
+def prepare_img():
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ image = Image.open(requests.get(url, stream=True).raw)
+ return image
+
+
+@torch.no_grad()
+def convert_dinov2_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=False):
+ """
+ Copy/paste/tweak model's weights to our DINOv2 structure.
+ """
+
+ # define default Dinov2 configuration
+ image_classifier = "1layer" in model_name
+ config = get_dinov2_config(model_name, image_classifier=image_classifier)
+
+ # load original model from torch hub
+ original_model = torch.hub.load("facebookresearch/dinov2", model_name.replace("_1layer", ""))
+ original_model.eval()
+
+ # load state_dict of original model, remove and rename some keys
+ state_dict = original_model.state_dict()
+ rename_keys = create_rename_keys(config)
+ for src, dest in rename_keys:
+ rename_key(state_dict, src, dest)
+ read_in_q_k_v(state_dict, config)
+
+ for key, val in state_dict.copy().items():
+ val = state_dict.pop(key)
+ if "w12" in key:
+ key = key.replace("w12", "weights_in")
+ if "w3" in key:
+ key = key.replace("w3", "weights_out")
+ state_dict[key] = val
+
+ # load HuggingFace model
+ if image_classifier:
+ model = Dinov2ForImageClassification(config).eval()
+ model.dinov2.load_state_dict(state_dict)
+ model_name_to_classifier_dict_url = {
+ "dinov2_vits14_1layer": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_linear_head.pth",
+ "dinov2_vitb14_1layer": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_linear_head.pth",
+ "dinov2_vitl14_1layer": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_linear_head.pth",
+ "dinov2_vitg14_1layer": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_linear_head.pth",
+ }
+ url = model_name_to_classifier_dict_url[model_name]
+ classifier_state_dict = torch.hub.load_state_dict_from_url(url, map_location="cpu")
+ model.classifier.weight = nn.Parameter(classifier_state_dict["weight"])
+ model.classifier.bias = nn.Parameter(classifier_state_dict["bias"])
+ else:
+ model = Dinov2Model(config).eval()
+ model.load_state_dict(state_dict)
+
+ # load image
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
+
+ # preprocess image
+ transformations = transforms.Compose(
+ [
+ transforms.Resize(256, interpolation=transforms.InterpolationMode.BICUBIC),
+ transforms.CenterCrop(224),
+ transforms.ToTensor(),
+ transforms.Normalize(
+ mean=IMAGENET_DEFAULT_MEAN, # these are RGB mean+std values
+ std=IMAGENET_DEFAULT_STD, # across a large photo dataset.
+ ),
+ ]
+ )
+
+ original_pixel_values = transformations(image).unsqueeze(0) # insert batch dimension
+
+ processor = BitImageProcessor(
+ size={"shortest_edge": 256},
+ resample=PILImageResampling.BICUBIC,
+ image_mean=IMAGENET_DEFAULT_MEAN,
+ image_std=IMAGENET_DEFAULT_STD,
+ )
+ pixel_values = processor(image, return_tensors="pt").pixel_values
+
+ assert torch.allclose(original_pixel_values, pixel_values)
+
+ with torch.no_grad():
+ outputs = model(pixel_values, output_hidden_states=True)
+ original_outputs = original_model(pixel_values)
+
+ # assert values
+ if image_classifier:
+ print("Predicted class:")
+ class_idx = outputs.logits.argmax(-1).item()
+ print(model.config.id2label[class_idx])
+ else:
+ assert outputs.last_hidden_state[:, 0].shape == original_outputs.shape
+ assert torch.allclose(outputs.last_hidden_state[:, 0], original_outputs, atol=1e-3)
+ print("Looks ok!")
+
+ if pytorch_dump_folder_path is not None:
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
+ print(f"Saving model {model_name} to {pytorch_dump_folder_path}")
+ model.save_pretrained(pytorch_dump_folder_path)
+ print(f"Saving image processor to {pytorch_dump_folder_path}")
+ processor.save_pretrained(pytorch_dump_folder_path)
+
+ if push_to_hub:
+ model_name_to_hf_name = {
+ "dinov2_vits14": "dinov2-small",
+ "dinov2_vitb14": "dinov2-base",
+ "dinov2_vitl14": "dinov2-large",
+ "dinov2_vitg14": "dinov2-giant",
+ "dinov2_vits14_1layer": "dinov2-small-imagenet1k-1-layer",
+ "dinov2_vitb14_1layer": "dinov2-base-imagenet1k-1-layer",
+ "dinov2_vitl14_1layer": "dinov2-large-imagenet1k-1-layer",
+ "dinov2_vitg14_1layer": "dinov2-giant-imagenet1k-1-layer",
+ }
+
+ name = model_name_to_hf_name[model_name]
+ model.push_to_hub(f"facebook/{name}")
+ processor.push_to_hub(f"facebook/{name}")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--model_name",
+ default="dinov2_vitb14",
+ type=str,
+ choices=[
+ "dinov2_vits14",
+ "dinov2_vitb14",
+ "dinov2_vitl14",
+ "dinov2_vitg14",
+ "dinov2_vits14_1layer",
+ "dinov2_vitb14_1layer",
+ "dinov2_vitl14_1layer",
+ "dinov2_vitg14_1layer",
+ ],
+ help="Name of the model you'd like to convert.",
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
+ )
+ parser.add_argument(
+ "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
+ )
+
+ args = parser.parse_args()
+ convert_dinov2_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/dinov2/modeling_dinov2.py b/venv/lib/python3.10/site-packages/transformers/models/dinov2/modeling_dinov2.py
new file mode 100644
index 0000000000000000000000000000000000000000..c25022f6ec22d8d4afa1f926af9eb6e4d03adb35
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/dinov2/modeling_dinov2.py
@@ -0,0 +1,856 @@
+# coding=utf-8
+# Copyright 2023 Meta AI and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch DINOv2 model."""
+
+
+import collections.abc
+import math
+from typing import Dict, List, Optional, Set, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import (
+ BackboneOutput,
+ BaseModelOutput,
+ BaseModelOutputWithPooling,
+ ImageClassifierOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from ...utils.backbone_utils import BackboneMixin
+from .configuration_dinov2 import Dinov2Config
+
+
+logger = logging.get_logger(__name__)
+
+# General docstring
+_CONFIG_FOR_DOC = "Dinov2Config"
+
+# Base docstring
+_CHECKPOINT_FOR_DOC = "facebook/dinov2-base"
+_EXPECTED_OUTPUT_SHAPE = [1, 257, 768]
+
+# Image classification docstring
+_IMAGE_CLASS_CHECKPOINT = "facebook/dinov2-small-imagenet1k-1-layer"
+_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
+
+
+from ..deprecated._archive_maps import DINOV2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+class Dinov2Embeddings(nn.Module):
+ """
+ Construct the CLS token, mask token, position and patch embeddings.
+ """
+
+ def __init__(self, config: Dinov2Config) -> None:
+ super().__init__()
+
+ self.cls_token = nn.Parameter(torch.randn(1, 1, config.hidden_size))
+ self.mask_token = nn.Parameter(torch.zeros(1, config.hidden_size))
+ self.patch_embeddings = Dinov2PatchEmbeddings(config)
+ num_patches = self.patch_embeddings.num_patches
+ self.position_embeddings = nn.Parameter(torch.randn(1, num_patches + 1, config.hidden_size))
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.config = config
+
+ def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
+ """
+ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
+ resolution images.
+
+ Source:
+ https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174
+ """
+
+ num_patches = embeddings.shape[1] - 1
+ num_positions = self.position_embeddings.shape[1] - 1
+ if num_patches == num_positions and height == width:
+ return self.position_embeddings
+ class_pos_embed = self.position_embeddings[:, 0]
+ patch_pos_embed = self.position_embeddings[:, 1:]
+ dim = embeddings.shape[-1]
+ height = height // self.config.patch_size
+ width = width // self.config.patch_size
+ # we add a small number to avoid floating point error in the interpolation
+ # see discussion at https://github.com/facebookresearch/dino/issues/8
+ height, width = height + 0.1, width + 0.1
+ patch_pos_embed = patch_pos_embed.reshape(1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim)
+ patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
+ target_dtype = patch_pos_embed.dtype
+ patch_pos_embed = nn.functional.interpolate(
+ patch_pos_embed.to(dtype=torch.float32),
+ scale_factor=(float(height / math.sqrt(num_positions)), float(width / math.sqrt(num_positions))),
+ mode="bicubic",
+ align_corners=False,
+ ).to(dtype=target_dtype)
+ if int(height) != patch_pos_embed.shape[-2] or int(width) != patch_pos_embed.shape[-1]:
+ raise ValueError("Width or height does not match with the interpolated position embeddings")
+ patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
+ return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
+
+ def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.Tensor] = None) -> torch.Tensor:
+ batch_size, _, height, width = pixel_values.shape
+ target_dtype = self.patch_embeddings.projection.weight.dtype
+ embeddings = self.patch_embeddings(pixel_values.to(dtype=target_dtype))
+
+ if bool_masked_pos is not None:
+ embeddings = torch.where(
+ bool_masked_pos.unsqueeze(-1), self.mask_token.to(embeddings.dtype).unsqueeze(0), embeddings
+ )
+
+ # add the [CLS] token to the embedded patch tokens
+ cls_tokens = self.cls_token.expand(batch_size, -1, -1)
+ embeddings = torch.cat((cls_tokens, embeddings), dim=1)
+
+ # add positional encoding to each token
+ embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
+
+ embeddings = self.dropout(embeddings)
+
+ return embeddings
+
+
+class Dinov2PatchEmbeddings(nn.Module):
+ """
+ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
+ `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
+ Transformer.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ image_size, patch_size = config.image_size, config.patch_size
+ num_channels, hidden_size = config.num_channels, config.hidden_size
+
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.num_patches = num_patches
+
+ self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
+
+ def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
+ num_channels = pixel_values.shape[1]
+ if num_channels != self.num_channels:
+ raise ValueError(
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
+ f" Expected {self.num_channels} but got {num_channels}."
+ )
+ embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2)
+ return embeddings
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTSelfAttention with ViT->Dinov2
+class Dinov2SelfAttention(nn.Module):
+ def __init__(self, config: Dinov2Config) -> None:
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
+ f"heads {config.num_attention_heads}."
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
+ mixed_query_layer = self.query(hidden_states)
+
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ return outputs
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->Dinov2
+class Dinov2SelfOutput(nn.Module):
+ """
+ The residual connection is defined in Dinov2Layer instead of here (as is the case with other models), due to the
+ layernorm applied before each block.
+ """
+
+ def __init__(self, config: Dinov2Config) -> None:
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ return hidden_states
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->Dinov2
+class Dinov2Attention(nn.Module):
+ def __init__(self, config: Dinov2Config) -> None:
+ super().__init__()
+ self.attention = Dinov2SelfAttention(config)
+ self.output = Dinov2SelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads: Set[int]) -> None:
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.attention.query = prune_linear_layer(self.attention.query, index)
+ self.attention.key = prune_linear_layer(self.attention.key, index)
+ self.attention.value = prune_linear_layer(self.attention.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
+ self_outputs = self.attention(hidden_states, head_mask, output_attentions)
+
+ attention_output = self.output(self_outputs[0], hidden_states)
+
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+class Dinov2LayerScale(nn.Module):
+ def __init__(self, config) -> None:
+ super().__init__()
+ self.lambda1 = nn.Parameter(config.layerscale_value * torch.ones(config.hidden_size))
+
+ def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
+ return hidden_state * self.lambda1
+
+
+# Copied from transformers.models.beit.modeling_beit.drop_path
+def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
+ """
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
+
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
+ argument.
+ """
+ if drop_prob == 0.0 or not training:
+ return input
+ keep_prob = 1 - drop_prob
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
+ random_tensor.floor_() # binarize
+ output = input.div(keep_prob) * random_tensor
+ return output
+
+
+# Copied from transformers.models.beit.modeling_beit.BeitDropPath
+class Dinov2DropPath(nn.Module):
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
+
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
+ super().__init__()
+ self.drop_prob = drop_prob
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ return drop_path(hidden_states, self.drop_prob, self.training)
+
+ def extra_repr(self) -> str:
+ return "p={}".format(self.drop_prob)
+
+
+class Dinov2MLP(nn.Module):
+ def __init__(self, config) -> None:
+ super().__init__()
+ in_features = out_features = config.hidden_size
+ hidden_features = int(config.hidden_size * config.mlp_ratio)
+ self.fc1 = nn.Linear(in_features, hidden_features, bias=True)
+ if isinstance(config.hidden_act, str):
+ self.activation = ACT2FN[config.hidden_act]
+ else:
+ self.activation = config.hidden_act
+ self.fc2 = nn.Linear(hidden_features, out_features, bias=True)
+
+ def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
+ hidden_state = self.fc1(hidden_state)
+ hidden_state = self.activation(hidden_state)
+ hidden_state = self.fc2(hidden_state)
+ return hidden_state
+
+
+class Dinov2SwiGLUFFN(nn.Module):
+ def __init__(self, config) -> None:
+ super().__init__()
+ in_features = out_features = config.hidden_size
+ hidden_features = int(config.hidden_size * config.mlp_ratio)
+ hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8
+
+ self.weights_in = nn.Linear(in_features, 2 * hidden_features, bias=True)
+ self.weights_out = nn.Linear(hidden_features, out_features, bias=True)
+
+ def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
+ hidden_state = self.weights_in(hidden_state)
+ x1, x2 = hidden_state.chunk(2, dim=-1)
+ hidden = nn.functional.silu(x1) * x2
+ return self.weights_out(hidden)
+
+
+class Dinov2Layer(nn.Module):
+ """This corresponds to the Block class in the original implementation."""
+
+ def __init__(self, config: Dinov2Config) -> None:
+ super().__init__()
+
+ self.norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.attention = Dinov2Attention(config)
+ self.layer_scale1 = Dinov2LayerScale(config)
+ self.drop_path = Dinov2DropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity()
+
+ self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ if config.use_swiglu_ffn:
+ self.mlp = Dinov2SwiGLUFFN(config)
+ else:
+ self.mlp = Dinov2MLP(config)
+ self.layer_scale2 = Dinov2LayerScale(config)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
+ self_attention_outputs = self.attention(
+ self.norm1(hidden_states), # in Dinov2, layernorm is applied before self-attention
+ head_mask,
+ output_attentions=output_attentions,
+ )
+ attention_output = self_attention_outputs[0]
+
+ attention_output = self.layer_scale1(attention_output)
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ # first residual connection
+ hidden_states = self.drop_path(attention_output) + hidden_states
+
+ # in Dinov2, layernorm is also applied after self-attention
+ layer_output = self.norm2(hidden_states)
+ layer_output = self.mlp(layer_output)
+ layer_output = self.layer_scale2(layer_output)
+
+ # second residual connection
+ layer_output = self.drop_path(layer_output) + hidden_states
+
+ outputs = (layer_output,) + outputs
+
+ return outputs
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTEncoder with ViT->Dinov2
+class Dinov2Encoder(nn.Module):
+ def __init__(self, config: Dinov2Config) -> None:
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList([Dinov2Layer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ) -> Union[tuple, BaseModelOutput]:
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ layer_head_mask,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions)
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+
+class Dinov2PreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = Dinov2Config
+ base_model_prefix = "dinov2"
+ main_input_name = "pixel_values"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
+ """Initialize the weights"""
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
+ # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid
+ # `trunc_normal_cpu` not implemented in `half` issues
+ module.weight.data = nn.init.trunc_normal_(
+ module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range
+ ).to(module.weight.dtype)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+ elif isinstance(module, Dinov2Embeddings):
+ module.position_embeddings.data = nn.init.trunc_normal_(
+ module.position_embeddings.data.to(torch.float32),
+ mean=0.0,
+ std=self.config.initializer_range,
+ ).to(module.position_embeddings.dtype)
+
+ module.cls_token.data = nn.init.trunc_normal_(
+ module.cls_token.data.to(torch.float32),
+ mean=0.0,
+ std=self.config.initializer_range,
+ ).to(module.cls_token.dtype)
+
+
+DINOV2_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`Dinov2Config`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+DINOV2_BASE_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
+ [`BitImageProcessor.preprocess`] for details.
+
+ bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`):
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Only relevant for
+ pre-training.
+
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+DINOV2_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
+ [`BitImageProcessor.preprocess`] for details.
+
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare DINOv2 Model transformer outputting raw hidden-states without any specific head on top.",
+ DINOV2_START_DOCSTRING,
+)
+class Dinov2Model(Dinov2PreTrainedModel):
+ def __init__(self, config: Dinov2Config):
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = Dinov2Embeddings(config)
+ self.encoder = Dinov2Encoder(config)
+
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self) -> Dinov2PatchEmbeddings:
+ return self.embeddings.patch_embeddings
+
+ def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(DINOV2_BASE_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithPooling,
+ config_class=_CONFIG_FOR_DOC,
+ modality="vision",
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
+ )
+ def forward(
+ self,
+ pixel_values: Optional[torch.Tensor] = None,
+ bool_masked_pos: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ embedding_output = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+ sequence_output = self.layernorm(sequence_output)
+ pooled_output = sequence_output[:, 0, :]
+
+ if not return_dict:
+ head_outputs = (sequence_output, pooled_output)
+ return head_outputs + encoder_outputs[1:]
+
+ return BaseModelOutputWithPooling(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Dinov2 Model transformer with an image classification head on top (a linear layer on top of the final hidden state
+ of the [CLS] token) e.g. for ImageNet.
+ """,
+ DINOV2_START_DOCSTRING,
+)
+class Dinov2ForImageClassification(Dinov2PreTrainedModel):
+ def __init__(self, config: Dinov2Config) -> None:
+ super().__init__(config)
+
+ self.num_labels = config.num_labels
+ self.dinov2 = Dinov2Model(config)
+
+ # Classifier head
+ self.classifier = (
+ nn.Linear(config.hidden_size * 2, config.num_labels) if config.num_labels > 0 else nn.Identity()
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(DINOV2_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
+ output_type=ImageClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
+ )
+ def forward(
+ self,
+ pixel_values: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[tuple, ImageClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.dinov2(
+ pixel_values,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0] # batch_size, sequence_length, hidden_size
+
+ cls_token = sequence_output[:, 0]
+ patch_tokens = sequence_output[:, 1:]
+
+ linear_input = torch.cat([cls_token, patch_tokens.mean(dim=1)], dim=1)
+
+ logits = self.classifier(linear_input)
+
+ loss = None
+ if labels is not None:
+ # move labels to correct device to enable model parallelism
+ labels = labels.to(logits.device)
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return ImageClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Dinov2 backbone, to be used with frameworks like DETR and MaskFormer.
+ """,
+ DINOV2_START_DOCSTRING,
+)
+class Dinov2Backbone(Dinov2PreTrainedModel, BackboneMixin):
+ def __init__(self, config):
+ super().__init__(config)
+ super()._init_backbone(config)
+
+ self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)]
+ self.embeddings = Dinov2Embeddings(config)
+ self.encoder = Dinov2Encoder(config)
+
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self) -> Dinov2PatchEmbeddings:
+ return self.embeddings.patch_embeddings
+
+ @add_start_docstrings_to_model_forward(DINOV2_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: torch.Tensor,
+ output_hidden_states: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> BackboneOutput:
+ """
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, AutoBackbone
+ >>> import torch
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> processor = AutoImageProcessor.from_pretrained("facebook/dinov2-base")
+ >>> model = AutoBackbone.from_pretrained(
+ ... "facebook/dinov2-base", out_features=["stage2", "stage5", "stage8", "stage11"]
+ ... )
+
+ >>> inputs = processor(image, return_tensors="pt")
+
+ >>> outputs = model(**inputs)
+ >>> feature_maps = outputs.feature_maps
+ >>> list(feature_maps[-1].shape)
+ [1, 768, 16, 16]
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+
+ embedding_output = self.embeddings(pixel_values)
+
+ outputs = self.encoder(
+ embedding_output, output_hidden_states=True, output_attentions=output_attentions, return_dict=return_dict
+ )
+
+ hidden_states = outputs.hidden_states if return_dict else outputs[1]
+
+ feature_maps = ()
+ for stage, hidden_state in zip(self.stage_names, hidden_states):
+ if stage in self.out_features:
+ if self.config.apply_layernorm:
+ hidden_state = self.layernorm(hidden_state)
+ if self.config.reshape_hidden_states:
+ hidden_state = hidden_state[:, 1:]
+ # this was actually a bug in the original implementation that we copied here,
+ # cause normally the order is height, width
+ batch_size, _, height, width = pixel_values.shape
+ patch_size = self.config.patch_size
+ hidden_state = hidden_state.reshape(batch_size, height // patch_size, width // patch_size, -1)
+ hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()
+ feature_maps += (hidden_state,)
+
+ if not return_dict:
+ if output_hidden_states:
+ output = (feature_maps,) + outputs[1:]
+ else:
+ output = (feature_maps,) + outputs[2:]
+ return output
+
+ return BackboneOutput(
+ feature_maps=feature_maps,
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
+ attentions=outputs.attentions if output_attentions else None,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/distilbert/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/distilbert/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6a2756eb9d1c269e08446f9328120738196349d0
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/distilbert/__init__.py
@@ -0,0 +1,166 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_flax_available,
+ is_tf_available,
+ is_tokenizers_available,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "configuration_distilbert": [
+ "DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
+ "DistilBertConfig",
+ "DistilBertOnnxConfig",
+ ],
+ "tokenization_distilbert": ["DistilBertTokenizer"],
+}
+
+try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_distilbert_fast"] = ["DistilBertTokenizerFast"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_distilbert"] = [
+ "DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "DistilBertForMaskedLM",
+ "DistilBertForMultipleChoice",
+ "DistilBertForQuestionAnswering",
+ "DistilBertForSequenceClassification",
+ "DistilBertForTokenClassification",
+ "DistilBertModel",
+ "DistilBertPreTrainedModel",
+ ]
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_distilbert"] = [
+ "TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TFDistilBertForMaskedLM",
+ "TFDistilBertForMultipleChoice",
+ "TFDistilBertForQuestionAnswering",
+ "TFDistilBertForSequenceClassification",
+ "TFDistilBertForTokenClassification",
+ "TFDistilBertMainLayer",
+ "TFDistilBertModel",
+ "TFDistilBertPreTrainedModel",
+ ]
+
+try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_flax_distilbert"] = [
+ "FlaxDistilBertForMaskedLM",
+ "FlaxDistilBertForMultipleChoice",
+ "FlaxDistilBertForQuestionAnswering",
+ "FlaxDistilBertForSequenceClassification",
+ "FlaxDistilBertForTokenClassification",
+ "FlaxDistilBertModel",
+ "FlaxDistilBertPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_distilbert import (
+ DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
+ DistilBertConfig,
+ DistilBertOnnxConfig,
+ )
+ from .tokenization_distilbert import DistilBertTokenizer
+
+ try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_distilbert_fast import DistilBertTokenizerFast
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_distilbert import (
+ DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ DistilBertForMaskedLM,
+ DistilBertForMultipleChoice,
+ DistilBertForQuestionAnswering,
+ DistilBertForSequenceClassification,
+ DistilBertForTokenClassification,
+ DistilBertModel,
+ DistilBertPreTrainedModel,
+ )
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_distilbert import (
+ TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TFDistilBertForMaskedLM,
+ TFDistilBertForMultipleChoice,
+ TFDistilBertForQuestionAnswering,
+ TFDistilBertForSequenceClassification,
+ TFDistilBertForTokenClassification,
+ TFDistilBertMainLayer,
+ TFDistilBertModel,
+ TFDistilBertPreTrainedModel,
+ )
+
+ try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_flax_distilbert import (
+ FlaxDistilBertForMaskedLM,
+ FlaxDistilBertForMultipleChoice,
+ FlaxDistilBertForQuestionAnswering,
+ FlaxDistilBertForSequenceClassification,
+ FlaxDistilBertForTokenClassification,
+ FlaxDistilBertModel,
+ FlaxDistilBertPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c70705cfe79d1cf9110830ffadb0198bc799e351
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/configuration_distilbert.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/configuration_distilbert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8a7309a4ab237f2388c08a3a66431bc3bee36df8
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/configuration_distilbert.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_distilbert.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_distilbert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..726fbf6aca2c442fcb77f028522909fd5dc7aa81
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_distilbert.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_flax_distilbert.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_flax_distilbert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d37a7a1338811a63a7d7fbb2fc9769abf256b1ac
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_flax_distilbert.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_tf_distilbert.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_tf_distilbert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..85a8abd7641f098ebf57f9a18a4adcf73f8ff6a6
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/modeling_tf_distilbert.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/tokenization_distilbert.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/tokenization_distilbert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2af40decdcfcdfb5afbd4d7a0e3560759529bcc6
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/tokenization_distilbert.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/tokenization_distilbert_fast.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/tokenization_distilbert_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..be2f01bb3c6bfb9855756309fec3ce1180057851
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/tokenization_distilbert_fast.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/distilbert/configuration_distilbert.py b/venv/lib/python3.10/site-packages/transformers/models/distilbert/configuration_distilbert.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f6b004dc0bbb978f7f4efea55ab4f643a0a9dc9
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/distilbert/configuration_distilbert.py
@@ -0,0 +1,140 @@
+# coding=utf-8
+# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" DistilBERT model configuration"""
+from collections import OrderedDict
+from typing import Mapping
+
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class DistilBertConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`DistilBertModel`] or a [`TFDistilBertModel`]. It
+ is used to instantiate a DistilBERT model according to the specified arguments, defining the model architecture.
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the DistilBERT
+ [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 30522):
+ Vocabulary size of the DistilBERT model. Defines the number of different tokens that can be represented by
+ the `inputs_ids` passed when calling [`DistilBertModel`] or [`TFDistilBertModel`].
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ sinusoidal_pos_embds (`boolean`, *optional*, defaults to `False`):
+ Whether to use sinusoidal positional embeddings.
+ n_layers (`int`, *optional*, defaults to 6):
+ Number of hidden layers in the Transformer encoder.
+ n_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ dim (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ hidden_dim (`int`, *optional*, defaults to 3072):
+ The size of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ activation (`str` or `Callable`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ qa_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probabilities used in the question answering model [`DistilBertForQuestionAnswering`].
+ seq_classif_dropout (`float`, *optional*, defaults to 0.2):
+ The dropout probabilities used in the sequence classification and the multiple choice model
+ [`DistilBertForSequenceClassification`].
+
+ Examples:
+
+ ```python
+ >>> from transformers import DistilBertConfig, DistilBertModel
+
+ >>> # Initializing a DistilBERT configuration
+ >>> configuration = DistilBertConfig()
+
+ >>> # Initializing a model (with random weights) from the configuration
+ >>> model = DistilBertModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "distilbert"
+ attribute_map = {
+ "hidden_size": "dim",
+ "num_attention_heads": "n_heads",
+ "num_hidden_layers": "n_layers",
+ }
+
+ def __init__(
+ self,
+ vocab_size=30522,
+ max_position_embeddings=512,
+ sinusoidal_pos_embds=False,
+ n_layers=6,
+ n_heads=12,
+ dim=768,
+ hidden_dim=4 * 768,
+ dropout=0.1,
+ attention_dropout=0.1,
+ activation="gelu",
+ initializer_range=0.02,
+ qa_dropout=0.1,
+ seq_classif_dropout=0.2,
+ pad_token_id=0,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.sinusoidal_pos_embds = sinusoidal_pos_embds
+ self.n_layers = n_layers
+ self.n_heads = n_heads
+ self.dim = dim
+ self.hidden_dim = hidden_dim
+ self.dropout = dropout
+ self.attention_dropout = attention_dropout
+ self.activation = activation
+ self.initializer_range = initializer_range
+ self.qa_dropout = qa_dropout
+ self.seq_classif_dropout = seq_classif_dropout
+ super().__init__(**kwargs, pad_token_id=pad_token_id)
+
+
+class DistilBertOnnxConfig(OnnxConfig):
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ if self.task == "multiple-choice":
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
+ else:
+ dynamic_axis = {0: "batch", 1: "sequence"}
+ return OrderedDict(
+ [
+ ("input_ids", dynamic_axis),
+ ("attention_mask", dynamic_axis),
+ ]
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/distilbert/modeling_distilbert.py b/venv/lib/python3.10/site-packages/transformers/models/distilbert/modeling_distilbert.py
new file mode 100644
index 0000000000000000000000000000000000000000..3a65e0296116dca625ef343ac027243cca3dc392
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/distilbert/modeling_distilbert.py
@@ -0,0 +1,1384 @@
+# coding=utf-8
+# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ PyTorch DistilBERT model adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) and in
+ part from HuggingFace PyTorch version of Google AI Bert model (https://github.com/google-research/bert)
+"""
+
+
+import math
+from typing import Dict, List, Optional, Set, Tuple, Union
+
+import numpy as np
+import torch
+import torch.nn.functional as F
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import get_activation
+from ...configuration_utils import PretrainedConfig
+from ...integrations.deepspeed import is_deepspeed_zero3_enabled
+from ...modeling_outputs import (
+ BaseModelOutput,
+ MaskedLMOutput,
+ MultipleChoiceModelOutput,
+ QuestionAnsweringModelOutput,
+ SequenceClassifierOutput,
+ TokenClassifierOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_flash_attn_2_available,
+ is_flash_attn_greater_or_equal_2_10,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_distilbert import DistilBertConfig
+
+
+if is_flash_attn_2_available():
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
+
+
+logger = logging.get_logger(__name__)
+_CHECKPOINT_FOR_DOC = "distilbert-base-uncased"
+_CONFIG_FOR_DOC = "DistilBertConfig"
+
+
+from ..deprecated._archive_maps import DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+# UTILS AND BUILDING BLOCKS OF THE ARCHITECTURE #
+
+
+# Copied from transformers.models.llama.modeling_llama._get_unpad_data
+def _get_unpad_data(attention_mask):
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
+ return (
+ indices,
+ cu_seqlens,
+ max_seqlen_in_batch,
+ )
+
+
+def create_sinusoidal_embeddings(n_pos: int, dim: int, out: torch.Tensor):
+ if is_deepspeed_zero3_enabled():
+ import deepspeed
+
+ with deepspeed.zero.GatheredParameters(out, modifier_rank=0):
+ if torch.distributed.get_rank() == 0:
+ _create_sinusoidal_embeddings(n_pos=n_pos, dim=dim, out=out)
+ else:
+ _create_sinusoidal_embeddings(n_pos=n_pos, dim=dim, out=out)
+
+
+def _create_sinusoidal_embeddings(n_pos: int, dim: int, out: torch.Tensor):
+ position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)])
+ out.requires_grad = False
+ out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
+ out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
+ out.detach_()
+
+
+class Embeddings(nn.Module):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__()
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.dim, padding_idx=config.pad_token_id)
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.dim)
+
+ self.LayerNorm = nn.LayerNorm(config.dim, eps=1e-12)
+ self.dropout = nn.Dropout(config.dropout)
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
+ )
+
+ def forward(self, input_ids: torch.Tensor, input_embeds: Optional[torch.Tensor] = None) -> torch.Tensor:
+ """
+ Parameters:
+ input_ids (torch.Tensor):
+ torch.tensor(bs, max_seq_length) The token ids to embed.
+ input_embeds (*optional*, torch.Tensor):
+ The pre-computed word embeddings. Can only be passed if the input ids are `None`.
+
+
+ Returns: torch.tensor(bs, max_seq_length, dim) The embedded tokens (plus position embeddings, no token_type
+ embeddings)
+ """
+ if input_ids is not None:
+ input_embeds = self.word_embeddings(input_ids) # (bs, max_seq_length, dim)
+
+ seq_length = input_embeds.size(1)
+
+ # Setting the position-ids to the registered buffer in constructor, it helps
+ # when tracing the model without passing position-ids, solves
+ # isues similar to issue #5664
+ if hasattr(self, "position_ids"):
+ position_ids = self.position_ids[:, :seq_length]
+ else:
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) # (max_seq_length)
+ position_ids = position_ids.unsqueeze(0).expand_as(input_ids) # (bs, max_seq_length)
+
+ position_embeddings = self.position_embeddings(position_ids) # (bs, max_seq_length, dim)
+
+ embeddings = input_embeds + position_embeddings # (bs, max_seq_length, dim)
+ embeddings = self.LayerNorm(embeddings) # (bs, max_seq_length, dim)
+ embeddings = self.dropout(embeddings) # (bs, max_seq_length, dim)
+ return embeddings
+
+
+class MultiHeadSelfAttention(nn.Module):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__()
+ self.config = config
+
+ self.n_heads = config.n_heads
+ self.dim = config.dim
+ self.dropout = nn.Dropout(p=config.attention_dropout)
+ self.is_causal = False
+
+ # Have an even number of multi heads that divide the dimensions
+ if self.dim % self.n_heads != 0:
+ # Raise value errors for even multi-head attention nodes
+ raise ValueError(f"self.n_heads: {self.n_heads} must divide self.dim: {self.dim} evenly")
+
+ self.q_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
+ self.k_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
+ self.v_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
+ self.out_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
+
+ self.pruned_heads: Set[int] = set()
+ self.attention_head_size = self.dim // self.n_heads
+
+ def prune_heads(self, heads: List[int]):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.n_heads, self.attention_head_size, self.pruned_heads
+ )
+ # Prune linear layers
+ self.q_lin = prune_linear_layer(self.q_lin, index)
+ self.k_lin = prune_linear_layer(self.k_lin, index)
+ self.v_lin = prune_linear_layer(self.v_lin, index)
+ self.out_lin = prune_linear_layer(self.out_lin, index, dim=1)
+ # Update hyper params
+ self.n_heads = self.n_heads - len(heads)
+ self.dim = self.attention_head_size * self.n_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ query: torch.Tensor,
+ key: torch.Tensor,
+ value: torch.Tensor,
+ mask: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, ...]:
+ """
+ Parameters:
+ query: torch.tensor(bs, seq_length, dim)
+ key: torch.tensor(bs, seq_length, dim)
+ value: torch.tensor(bs, seq_length, dim)
+ mask: torch.tensor(bs, seq_length)
+
+ Returns:
+ weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs,
+ seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
+ """
+ bs, q_length, dim = query.size()
+ k_length = key.size(1)
+ # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'
+ # assert key.size() == value.size()
+
+ dim_per_head = self.dim // self.n_heads
+
+ mask_reshp = (bs, 1, 1, k_length)
+
+ def shape(x: torch.Tensor) -> torch.Tensor:
+ """separate heads"""
+ return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2)
+
+ def unshape(x: torch.Tensor) -> torch.Tensor:
+ """group heads"""
+ return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head)
+
+ q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head)
+ k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head)
+ v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head)
+
+ q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_length, dim_per_head)
+ scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, q_length, k_length)
+ mask = (mask == 0).view(mask_reshp).expand_as(scores) # (bs, n_heads, q_length, k_length)
+ scores = scores.masked_fill(
+ mask, torch.tensor(torch.finfo(scores.dtype).min)
+ ) # (bs, n_heads, q_length, k_length)
+
+ weights = nn.functional.softmax(scores, dim=-1) # (bs, n_heads, q_length, k_length)
+ weights = self.dropout(weights) # (bs, n_heads, q_length, k_length)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ weights = weights * head_mask
+
+ context = torch.matmul(weights, v) # (bs, n_heads, q_length, dim_per_head)
+ context = unshape(context) # (bs, q_length, dim)
+ context = self.out_lin(context) # (bs, q_length, dim)
+
+ if output_attentions:
+ return (context, weights)
+ else:
+ return (context,)
+
+
+class DistilBertFlashAttention2(MultiHeadSelfAttention):
+ """
+ DistilBert flash attention module. This module inherits from `MultiHeadSelfAttention` as the weights of the module
+ stays untouched. The only required change would be on the forward pass where it needs to correctly call the public
+ API of flash attention and deal with padding tokens in case the input contains any of them.
+ """
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
+
+ def forward(
+ self,
+ query: torch.Tensor,
+ key: torch.Tensor,
+ value: torch.Tensor,
+ mask: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, ...]:
+ """
+ Parameters:
+ query: torch.tensor(bs, seq_length, dim)
+ key: torch.tensor(bs, seq_length, dim)
+ value: torch.tensor(bs, seq_length, dim)
+ mask: torch.tensor(bs, seq_length)
+
+ Returns:
+ weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs,
+ seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
+ """
+ batch_size, q_length, dim = query.size()
+
+ dim_per_head = self.dim // self.n_heads
+
+ def reshape(x: torch.Tensor) -> torch.Tensor:
+ """separate heads"""
+ return x.view(batch_size, -1, self.n_heads, dim_per_head)
+
+ # Flash attention requires the input to have the shape
+ # batch_size x seq_length x head_dim x hidden_dim
+ query_states = reshape(self.q_lin(query))
+ key_states = reshape(self.k_lin(key))
+ value_states = reshape(self.v_lin(value))
+
+ attn_dropout = self.config.attention_dropout if self.training else 0.0
+
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
+ # cast them back in the correct dtype just to be sure everything works as expected.
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
+ # in fp32. (LlamaRMSNorm handles it correctly)
+
+ if query_states.dtype == torch.float32:
+ if torch.is_autocast_enabled():
+ target_dtype = torch.get_autocast_gpu_dtype()
+ # Handle the case where the model is quantized
+ elif hasattr(self.config, "_pre_quantization_dtype"):
+ target_dtype = self.config._pre_quantization_dtype
+ else:
+ target_dtype = self.q_lin.weight.dtype
+
+ logger.warning_once(
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
+ f" {target_dtype}."
+ )
+
+ query_states = query_states.to(target_dtype)
+ key_states = key_states.to(target_dtype)
+ value_states = value_states.to(target_dtype)
+
+ attn_weights = self._flash_attention_forward(
+ query_states, key_states, value_states, mask, q_length, dropout=attn_dropout
+ )
+
+ attn_weights_reshaped = attn_weights.reshape(batch_size, q_length, self.n_heads * dim_per_head)
+ attn_output = self.out_lin(attn_weights_reshaped)
+
+ if output_attentions:
+ return (attn_output, attn_weights)
+ else:
+ return (attn_output,)
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward with causal=True->causal=False
+ def _flash_attention_forward(
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
+ ):
+ """
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
+ first unpad the input, then computes the attention scores and pad the final attention scores.
+
+ Args:
+ query_states (`torch.Tensor`):
+ Input query states to be passed to Flash Attention API
+ key_states (`torch.Tensor`):
+ Input key states to be passed to Flash Attention API
+ value_states (`torch.Tensor`):
+ Input value states to be passed to Flash Attention API
+ attention_mask (`torch.Tensor`):
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
+ position of padding tokens and 1 for the position of non-padding tokens.
+ dropout (`float`):
+ Attention dropout
+ softmax_scale (`float`, *optional*):
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
+ """
+ if not self._flash_attn_uses_top_left_mask:
+ causal = self.is_causal
+ else:
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
+ causal = self.is_causal and query_length != 1
+
+ # Contains at least one padding token in the sequence
+ if attention_mask is not None:
+ batch_size = query_states.shape[0]
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
+ query_states, key_states, value_states, attention_mask, query_length
+ )
+
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
+
+ attn_output_unpad = flash_attn_varlen_func(
+ query_states,
+ key_states,
+ value_states,
+ cu_seqlens_q=cu_seqlens_q,
+ cu_seqlens_k=cu_seqlens_k,
+ max_seqlen_q=max_seqlen_in_batch_q,
+ max_seqlen_k=max_seqlen_in_batch_k,
+ dropout_p=dropout,
+ softmax_scale=softmax_scale,
+ causal=causal,
+ )
+
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
+ else:
+ attn_output = flash_attn_func(
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
+ )
+
+ return attn_output
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input with num_heads->n_heads
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
+
+ key_layer = index_first_axis(
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
+ )
+ value_layer = index_first_axis(
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
+ )
+ if query_length == kv_seq_len:
+ query_layer = index_first_axis(
+ query_layer.reshape(batch_size * kv_seq_len, self.n_heads, head_dim), indices_k
+ )
+ cu_seqlens_q = cu_seqlens_k
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
+ indices_q = indices_k
+ elif query_length == 1:
+ max_seqlen_in_batch_q = 1
+ cu_seqlens_q = torch.arange(
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
+ ) # There is a memcpy here, that is very bad.
+ indices_q = cu_seqlens_q[:-1]
+ query_layer = query_layer.squeeze(1)
+ else:
+ # The -q_len: slice assumes left padding.
+ attention_mask = attention_mask[:, -query_length:]
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
+
+ return (
+ query_layer,
+ key_layer,
+ value_layer,
+ indices_q,
+ (cu_seqlens_q, cu_seqlens_k),
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
+ )
+
+
+class FFN(nn.Module):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__()
+ self.dropout = nn.Dropout(p=config.dropout)
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.lin1 = nn.Linear(in_features=config.dim, out_features=config.hidden_dim)
+ self.lin2 = nn.Linear(in_features=config.hidden_dim, out_features=config.dim)
+ self.activation = get_activation(config.activation)
+
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
+ return apply_chunking_to_forward(self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, input)
+
+ def ff_chunk(self, input: torch.Tensor) -> torch.Tensor:
+ x = self.lin1(input)
+ x = self.activation(x)
+ x = self.lin2(x)
+ x = self.dropout(x)
+ return x
+
+
+DISTILBERT_ATTENTION_CLASSES = {
+ "eager": MultiHeadSelfAttention,
+ "flash_attention_2": DistilBertFlashAttention2,
+}
+
+
+class TransformerBlock(nn.Module):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__()
+
+ # Have an even number of Configure multi-heads
+ if config.dim % config.n_heads != 0:
+ raise ValueError(f"config.n_heads {config.n_heads} must divide config.dim {config.dim} evenly")
+
+ self.attention = DISTILBERT_ATTENTION_CLASSES[config._attn_implementation](config)
+ self.sa_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)
+
+ self.ffn = FFN(config)
+ self.output_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)
+
+ def forward(
+ self,
+ x: torch.Tensor,
+ attn_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, ...]:
+ """
+ Parameters:
+ x: torch.tensor(bs, seq_length, dim)
+ attn_mask: torch.tensor(bs, seq_length)
+
+ Returns:
+ sa_weights: torch.tensor(bs, n_heads, seq_length, seq_length) The attention weights ffn_output:
+ torch.tensor(bs, seq_length, dim) The output of the transformer block contextualization.
+ """
+ # Self-Attention
+ sa_output = self.attention(
+ query=x,
+ key=x,
+ value=x,
+ mask=attn_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ )
+ if output_attentions:
+ sa_output, sa_weights = sa_output # (bs, seq_length, dim), (bs, n_heads, seq_length, seq_length)
+ else: # To handle these `output_attentions` or `output_hidden_states` cases returning tuples
+ if type(sa_output) != tuple:
+ raise TypeError(f"sa_output must be a tuple but it is {type(sa_output)} type")
+
+ sa_output = sa_output[0]
+ sa_output = self.sa_layer_norm(sa_output + x) # (bs, seq_length, dim)
+
+ # Feed Forward Network
+ ffn_output = self.ffn(sa_output) # (bs, seq_length, dim)
+ ffn_output: torch.Tensor = self.output_layer_norm(ffn_output + sa_output) # (bs, seq_length, dim)
+
+ output = (ffn_output,)
+ if output_attentions:
+ output = (sa_weights,) + output
+ return output
+
+
+class Transformer(nn.Module):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__()
+ self.n_layers = config.n_layers
+ self.layer = nn.ModuleList([TransformerBlock(config) for _ in range(config.n_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ x: torch.Tensor,
+ attn_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: Optional[bool] = None,
+ ) -> Union[BaseModelOutput, Tuple[torch.Tensor, ...]]: # docstyle-ignore
+ """
+ Parameters:
+ x: torch.tensor(bs, seq_length, dim) Input sequence embedded.
+ attn_mask: torch.tensor(bs, seq_length) Attention mask on the sequence.
+
+ Returns:
+ hidden_state: torch.tensor(bs, seq_length, dim) Sequence of hidden states in the last (top)
+ layer all_hidden_states: Tuple[torch.tensor(bs, seq_length, dim)]
+ Tuple of length n_layers with the hidden states from each layer.
+ Optional: only if output_hidden_states=True
+ all_attentions: Tuple[torch.tensor(bs, n_heads, seq_length, seq_length)]
+ Tuple of length n_layers with the attention weights from each layer
+ Optional: only if output_attentions=True
+ """
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ hidden_state = x
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_state,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_state,
+ attn_mask,
+ head_mask[i],
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(
+ hidden_state,
+ attn_mask,
+ head_mask[i],
+ output_attentions,
+ )
+
+ hidden_state = layer_outputs[-1]
+
+ if output_attentions:
+ if len(layer_outputs) != 2:
+ raise ValueError(f"The length of the layer_outputs should be 2, but it is {len(layer_outputs)}")
+
+ attentions = layer_outputs[0]
+ all_attentions = all_attentions + (attentions,)
+ else:
+ if len(layer_outputs) != 1:
+ raise ValueError(f"The length of the layer_outputs should be 1, but it is {len(layer_outputs)}")
+
+ # Add last layer
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_state,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_state, all_hidden_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_state, hidden_states=all_hidden_states, attentions=all_attentions
+ )
+
+
+# INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL #
+class DistilBertPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = DistilBertConfig
+ load_tf_weights = None
+ base_model_prefix = "distilbert"
+ supports_gradient_checkpointing = True
+ _supports_flash_attn_2 = True
+
+ def _init_weights(self, module: nn.Module):
+ """Initialize the weights."""
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+ elif isinstance(module, Embeddings) and self.config.sinusoidal_pos_embds:
+ create_sinusoidal_embeddings(
+ self.config.max_position_embeddings, self.config.dim, module.position_embeddings.weight
+ )
+
+
+DISTILBERT_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`DistilBertConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+DISTILBERT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare DistilBERT encoder/transformer outputting raw hidden-states without any specific head on top.",
+ DISTILBERT_START_DOCSTRING,
+)
+class DistilBertModel(DistilBertPreTrainedModel):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__(config)
+
+ self.embeddings = Embeddings(config) # Embeddings
+ self.transformer = Transformer(config) # Encoder
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_position_embeddings(self) -> nn.Embedding:
+ """
+ Returns the position embeddings
+ """
+ return self.embeddings.position_embeddings
+
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
+ """
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
+
+ Arguments:
+ new_num_position_embeddings (`int`):
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
+ the size will remove vectors from the end.
+ """
+ num_position_embeds_diff = new_num_position_embeddings - self.config.max_position_embeddings
+
+ # no resizing needs to be done if the length stays the same
+ if num_position_embeds_diff == 0:
+ return
+
+ logger.info(f"Setting `config.max_position_embeddings={new_num_position_embeddings}`...")
+ self.config.max_position_embeddings = new_num_position_embeddings
+
+ old_position_embeddings_weight = self.embeddings.position_embeddings.weight.clone()
+
+ self.embeddings.position_embeddings = nn.Embedding(self.config.max_position_embeddings, self.config.dim)
+
+ if self.config.sinusoidal_pos_embds:
+ create_sinusoidal_embeddings(
+ n_pos=self.config.max_position_embeddings, dim=self.config.dim, out=self.position_embeddings.weight
+ )
+ else:
+ with torch.no_grad():
+ if num_position_embeds_diff > 0:
+ self.embeddings.position_embeddings.weight[:-num_position_embeds_diff] = nn.Parameter(
+ old_position_embeddings_weight
+ )
+ else:
+ self.embeddings.position_embeddings.weight = nn.Parameter(
+ old_position_embeddings_weight[:num_position_embeds_diff]
+ )
+ # move position_embeddings to correct device
+ self.embeddings.position_embeddings.to(self.device)
+
+ def get_input_embeddings(self) -> nn.Embedding:
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, new_embeddings: nn.Embedding):
+ self.embeddings.word_embeddings = new_embeddings
+
+ def _prune_heads(self, heads_to_prune: Dict[int, List[List[int]]]):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.transformer.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[BaseModelOutput, Tuple[torch.Tensor, ...]]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ # Prepare head mask if needed
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ embeddings = self.embeddings(input_ids, inputs_embeds) # (bs, seq_length, dim)
+
+ if self._use_flash_attention_2:
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
+ else:
+ if attention_mask is None:
+ attention_mask = torch.ones(input_shape, device=device) # (bs, seq_length)
+
+ return self.transformer(
+ x=embeddings,
+ attn_mask=attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+
+@add_start_docstrings(
+ """DistilBert Model with a `masked language modeling` head on top.""",
+ DISTILBERT_START_DOCSTRING,
+)
+class DistilBertForMaskedLM(DistilBertPreTrainedModel):
+ _tied_weights_keys = ["vocab_projector.weight"]
+
+ def __init__(self, config: PretrainedConfig):
+ super().__init__(config)
+
+ self.activation = get_activation(config.activation)
+
+ self.distilbert = DistilBertModel(config)
+ self.vocab_transform = nn.Linear(config.dim, config.dim)
+ self.vocab_layer_norm = nn.LayerNorm(config.dim, eps=1e-12)
+ self.vocab_projector = nn.Linear(config.dim, config.vocab_size)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ self.mlm_loss_fct = nn.CrossEntropyLoss()
+
+ def get_position_embeddings(self) -> nn.Embedding:
+ """
+ Returns the position embeddings
+ """
+ return self.distilbert.get_position_embeddings()
+
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
+ """
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
+
+ Arguments:
+ new_num_position_embeddings (`int`):
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
+ the size will remove vectors from the end.
+ """
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
+
+ def get_output_embeddings(self) -> nn.Module:
+ return self.vocab_projector
+
+ def set_output_embeddings(self, new_embeddings: nn.Module):
+ self.vocab_projector = new_embeddings
+
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=MaskedLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[MaskedLMOutput, Tuple[torch.Tensor, ...]]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ dlbrt_output = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = dlbrt_output[0] # (bs, seq_length, dim)
+ prediction_logits = self.vocab_transform(hidden_states) # (bs, seq_length, dim)
+ prediction_logits = self.activation(prediction_logits) # (bs, seq_length, dim)
+ prediction_logits = self.vocab_layer_norm(prediction_logits) # (bs, seq_length, dim)
+ prediction_logits = self.vocab_projector(prediction_logits) # (bs, seq_length, vocab_size)
+
+ mlm_loss = None
+ if labels is not None:
+ mlm_loss = self.mlm_loss_fct(prediction_logits.view(-1, prediction_logits.size(-1)), labels.view(-1))
+
+ if not return_dict:
+ output = (prediction_logits,) + dlbrt_output[1:]
+ return ((mlm_loss,) + output) if mlm_loss is not None else output
+
+ return MaskedLMOutput(
+ loss=mlm_loss,
+ logits=prediction_logits,
+ hidden_states=dlbrt_output.hidden_states,
+ attentions=dlbrt_output.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
+ pooled output) e.g. for GLUE tasks.
+ """,
+ DISTILBERT_START_DOCSTRING,
+)
+class DistilBertForSequenceClassification(DistilBertPreTrainedModel):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.config = config
+
+ self.distilbert = DistilBertModel(config)
+ self.pre_classifier = nn.Linear(config.dim, config.dim)
+ self.classifier = nn.Linear(config.dim, config.num_labels)
+ self.dropout = nn.Dropout(config.seq_classif_dropout)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_position_embeddings(self) -> nn.Embedding:
+ """
+ Returns the position embeddings
+ """
+ return self.distilbert.get_position_embeddings()
+
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
+ """
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
+
+ Arguments:
+ new_num_position_embeddings (`int`):
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
+ the size will remove vectors from the end.
+ """
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
+
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=SequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[SequenceClassifierOutput, Tuple[torch.Tensor, ...]]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ distilbert_output = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_state = distilbert_output[0] # (bs, seq_len, dim)
+ pooled_output = hidden_state[:, 0] # (bs, dim)
+ pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
+ pooled_output = nn.ReLU()(pooled_output) # (bs, dim)
+ pooled_output = self.dropout(pooled_output) # (bs, dim)
+ logits = self.classifier(pooled_output) # (bs, num_labels)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + distilbert_output[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=distilbert_output.hidden_states,
+ attentions=distilbert_output.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
+ linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ DISTILBERT_START_DOCSTRING,
+)
+class DistilBertForQuestionAnswering(DistilBertPreTrainedModel):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__(config)
+
+ self.distilbert = DistilBertModel(config)
+ self.qa_outputs = nn.Linear(config.dim, config.num_labels)
+ if config.num_labels != 2:
+ raise ValueError(f"config.num_labels should be 2, but it is {config.num_labels}")
+
+ self.dropout = nn.Dropout(config.qa_dropout)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_position_embeddings(self) -> nn.Embedding:
+ """
+ Returns the position embeddings
+ """
+ return self.distilbert.get_position_embeddings()
+
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
+ """
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
+
+ Arguments:
+ new_num_position_embeddings (`int`):
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
+ the size will remove vectors from the end.
+ """
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
+
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=QuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ start_positions: Optional[torch.Tensor] = None,
+ end_positions: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[QuestionAnsweringModelOutput, Tuple[torch.Tensor, ...]]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ distilbert_output = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = distilbert_output[0] # (bs, max_query_len, dim)
+
+ hidden_states = self.dropout(hidden_states) # (bs, max_query_len, dim)
+ logits = self.qa_outputs(hidden_states) # (bs, max_query_len, 2)
+ start_logits, end_logits = logits.split(1, dim=-1)
+ start_logits = start_logits.squeeze(-1).contiguous() # (bs, max_query_len)
+ end_logits = end_logits.squeeze(-1).contiguous() # (bs, max_query_len)
+
+ total_loss = None
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, split add a dimension
+ if len(start_positions.size()) > 1:
+ start_positions = start_positions.squeeze(-1)
+ if len(end_positions.size()) > 1:
+ end_positions = end_positions.squeeze(-1)
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
+ ignored_index = start_logits.size(1)
+ start_positions = start_positions.clamp(0, ignored_index)
+ end_positions = end_positions.clamp(0, ignored_index)
+
+ loss_fct = nn.CrossEntropyLoss(ignore_index=ignored_index)
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if not return_dict:
+ output = (start_logits, end_logits) + distilbert_output[1:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return QuestionAnsweringModelOutput(
+ loss=total_loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=distilbert_output.hidden_states,
+ attentions=distilbert_output.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
+ for Named-Entity-Recognition (NER) tasks.
+ """,
+ DISTILBERT_START_DOCSTRING,
+)
+class DistilBertForTokenClassification(DistilBertPreTrainedModel):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.distilbert = DistilBertModel(config)
+ self.dropout = nn.Dropout(config.dropout)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_position_embeddings(self) -> nn.Embedding:
+ """
+ Returns the position embeddings
+ """
+ return self.distilbert.get_position_embeddings()
+
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
+ """
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
+
+ Arguments:
+ new_num_position_embeddings (`int`):
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
+ the size will remove vectors from the end.
+ """
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
+
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[TokenClassifierOutput, Tuple[torch.Tensor, ...]]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.distilbert(
+ input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ sequence_output = self.dropout(sequence_output)
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
+ a softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ DISTILBERT_START_DOCSTRING,
+)
+class DistilBertForMultipleChoice(DistilBertPreTrainedModel):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__(config)
+
+ self.distilbert = DistilBertModel(config)
+ self.pre_classifier = nn.Linear(config.dim, config.dim)
+ self.classifier = nn.Linear(config.dim, 1)
+ self.dropout = nn.Dropout(config.seq_classif_dropout)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_position_embeddings(self) -> nn.Embedding:
+ """
+ Returns the position embeddings
+ """
+ return self.distilbert.get_position_embeddings()
+
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
+ """
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
+
+ Arguments:
+ new_num_position_embeddings (`int`)
+ The number of new position embeddings. If position embeddings are learned, increasing the size will add
+ newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
+ position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
+ add correct vectors at the end following the position encoding algorithm, whereas reducing the size
+ will remove vectors from the end.
+ """
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
+
+ @add_start_docstrings_to_model_forward(
+ DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
+ )
+ @replace_return_docstrings(output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[MultipleChoiceModelOutput, Tuple[torch.Tensor, ...]]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
+ `input_ids` above)
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, DistilBertForMultipleChoice
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("distilbert-base-cased")
+ >>> model = DistilBertForMultipleChoice.from_pretrained("distilbert-base-cased")
+
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
+ >>> choice0 = "It is eaten with a fork and a knife."
+ >>> choice1 = "It is eaten while held in the hand."
+ >>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1
+
+ >>> encoding = tokenizer([[prompt, choice0], [prompt, choice1]], return_tensors="pt", padding=True)
+ >>> outputs = model(**{k: v.unsqueeze(0) for k, v in encoding.items()}, labels=labels) # batch size is 1
+
+ >>> # the linear classifier still needs to be trained
+ >>> loss = outputs.loss
+ >>> logits = outputs.logits
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
+
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
+ inputs_embeds = (
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
+ if inputs_embeds is not None
+ else None
+ )
+
+ outputs = self.distilbert(
+ input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_state = outputs[0] # (bs * num_choices, seq_len, dim)
+ pooled_output = hidden_state[:, 0] # (bs * num_choices, dim)
+ pooled_output = self.pre_classifier(pooled_output) # (bs * num_choices, dim)
+ pooled_output = nn.ReLU()(pooled_output) # (bs * num_choices, dim)
+ pooled_output = self.dropout(pooled_output) # (bs * num_choices, dim)
+ logits = self.classifier(pooled_output) # (bs * num_choices, 1)
+
+ reshaped_logits = logits.view(-1, num_choices) # (bs, num_choices)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(reshaped_logits, labels)
+
+ if not return_dict:
+ output = (reshaped_logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return MultipleChoiceModelOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/distilbert/modeling_flax_distilbert.py b/venv/lib/python3.10/site-packages/transformers/models/distilbert/modeling_flax_distilbert.py
new file mode 100644
index 0000000000000000000000000000000000000000..d3c48c077adc529a1e942fcbce1999c2d0f8d524
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/distilbert/modeling_flax_distilbert.py
@@ -0,0 +1,895 @@
+# coding=utf-8
+# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import math
+from typing import Callable, Optional, Tuple
+
+import flax.linen as nn
+import jax
+import jax.numpy as jnp
+import numpy as np
+from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
+from flax.traverse_util import flatten_dict, unflatten_dict
+from jax import lax
+
+from ...modeling_flax_outputs import (
+ FlaxBaseModelOutput,
+ FlaxMaskedLMOutput,
+ FlaxMultipleChoiceModelOutput,
+ FlaxQuestionAnsweringModelOutput,
+ FlaxSequenceClassifierOutput,
+ FlaxTokenClassifierOutput,
+)
+from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, overwrite_call_docstring
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging
+from .configuration_distilbert import DistilBertConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "distilbert-base-uncased"
+_CONFIG_FOR_DOC = "DistilBertConfig"
+
+
+FLAX_DISTILBERT_START_DOCSTRING = r"""
+
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading, saving and converting weights from PyTorch models)
+
+ This model is also a
+ [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as
+ a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and
+ behavior.
+
+ Finally, this model supports inherent JAX features such as:
+
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
+
+ Parameters:
+ config ([`DistilBertConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+DISTILBERT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`numpy.ndarray` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`numpy.ndarray` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+def get_angles(pos, i, d_model):
+ angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model))
+ return pos * angle_rates
+
+
+def positional_encoding(position, d_model):
+ # create the sinusoidal pattern for the positional encoding
+ angle_rads = get_angles(np.arange(position)[:, np.newaxis], np.arange(d_model)[np.newaxis, :], d_model)
+
+ # apply sin to even indices in the array; 2i
+ angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
+
+ # apply cos to odd indices in the array; 2i+1
+ angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
+
+ pos_encoding = angle_rads[np.newaxis, ...]
+
+ return jnp.array(pos_encoding)
+
+
+class FlaxEmbeddings(nn.Module):
+ """Construct the embeddings from word, position and token_type embeddings."""
+
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.word_embeddings = nn.Embed(
+ self.config.vocab_size,
+ self.config.dim,
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ if not self.config.sinusoidal_pos_embds:
+ self.position_embeddings = nn.Embed(
+ self.config.max_position_embeddings,
+ self.config.dim,
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ else:
+ self.pos_encoding = positional_encoding(self.config.max_position_embeddings, self.config.dim)
+ self.LayerNorm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype)
+ self.dropout = nn.Dropout(rate=self.config.dropout)
+
+ def __call__(self, input_ids, deterministic: bool = True):
+ # Embed
+ batch_size, seq_length = input_ids.shape
+ inputs_embeds = self.word_embeddings(input_ids.astype("i4"))
+ if not self.config.sinusoidal_pos_embds:
+ position_ids = jnp.arange(seq_length).astype("i4")
+ position_ids = jnp.broadcast_to(position_ids, shape=(batch_size, seq_length))
+ position_embeds = self.position_embeddings(position_ids.astype("i4"))
+ else:
+ position_embeds = self.pos_encoding[:, :seq_length, :]
+ # explicitly cast the positions here, since self.embed_positions are not registered as parameters
+ position_embeds = position_embeds.astype(inputs_embeds.dtype)
+
+ # Sum all embeddings
+ hidden_states = inputs_embeds + position_embeds
+
+ # Layer Norm
+ hidden_states = self.LayerNorm(hidden_states)
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ return hidden_states
+
+
+class FlaxMultiHeadSelfAttention(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.n_heads = self.config.n_heads
+ self.dim = self.config.dim
+ self.dropout = nn.Dropout(rate=self.config.attention_dropout)
+
+ if not (self.dim % self.n_heads == 0):
+ raise ValueError(f"Hidden size {self.dim} not dividable by number of heads {self.n_heads}")
+
+ self.q_lin = nn.Dense(
+ self.dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ self.k_lin = nn.Dense(
+ self.dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ self.v_lin = nn.Dense(
+ self.dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ self.out_lin = nn.Dense(
+ self.dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+
+ def __call__(
+ self,
+ query,
+ key,
+ value,
+ mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ ):
+ bs, q_len, dim = query.shape
+ k_len = key.shape[1]
+ # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'
+ # assert key.size() == value.size()
+
+ dim_per_head = self.dim // self.n_heads
+
+ mask_reshp = (bs, 1, 1, k_len)
+
+ def shape(x):
+ """separate heads"""
+ return x.reshape(bs, -1, self.n_heads, dim_per_head).transpose(0, 2, 1, 3)
+
+ def unshape(x):
+ """group heads"""
+ return x.transpose(0, 2, 1, 3).reshape(bs, -1, self.n_heads * dim_per_head)
+
+ q = shape(self.q_lin(query)) # (bs, n_heads, q_len, dim_per_head)
+ k = shape(self.k_lin(key)) # (bs, n_heads, k_len, dim_per_head)
+ v = shape(self.v_lin(value)) # (bs, n_heads, k_len, dim_per_head)
+
+ q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_len, dim_per_head)
+ scores = jnp.matmul(q, k.transpose(0, 1, 3, 2)) # (bs, n_heads, q_len, k_len)
+ mask = jnp.reshape(mask, mask_reshp)
+
+ mask = mask.astype(scores.dtype)
+ scores = scores - 1e30 * (1.0 - mask)
+
+ weights = nn.softmax(scores, axis=-1) # (bs, n_heads, q_len, k_len)
+ weights = self.dropout(weights, deterministic=deterministic)
+
+ context = jnp.matmul(weights, v) # (bs, n_heads, q_len, dim_per_head)
+ context = unshape(context) # (bs, q_len, dim)
+ context = self.out_lin(context) # (bs, q_len, dim)
+
+ if output_attentions:
+ return (context, weights)
+ else:
+ return (context,)
+
+
+class FlaxFFN(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.dropout = nn.Dropout(rate=self.config.dropout)
+ self.chunk_size_feed_forward = self.config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.lin1 = nn.Dense(
+ self.config.hidden_dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ self.lin2 = nn.Dense(
+ self.config.dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+
+ self.activation = ACT2FN[self.config.activation]
+
+ def __call__(self, hidden_states, deterministic: bool = True):
+ hidden_states = self.lin1(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ hidden_states = self.lin2(hidden_states)
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ return hidden_states
+
+
+class FlaxTransformerBlock(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ assert (
+ self.config.dim % self.config.n_heads == 0
+ ), f"Hidden size {self.config.dim} not dividable by number of heads {self.config.n_heads}"
+
+ self.attention = FlaxMultiHeadSelfAttention(self.config, dtype=self.dtype)
+ self.sa_layer_norm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype)
+
+ self.ffn = FlaxFFN(self.config, dtype=self.dtype)
+ self.output_layer_norm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype)
+
+ def __call__(
+ self,
+ hidden_states,
+ attn_mask,
+ output_attentions: bool = False,
+ deterministic: bool = True,
+ ):
+ # Self-Attention
+ sa_output = self.attention(
+ query=hidden_states,
+ key=hidden_states,
+ value=hidden_states,
+ mask=attn_mask,
+ output_attentions=output_attentions,
+ deterministic=deterministic,
+ )
+ if output_attentions:
+ sa_output, sa_weights = sa_output
+ else:
+ assert type(sa_output) == tuple
+ sa_output = sa_output[0]
+ sa_output = self.sa_layer_norm(sa_output + hidden_states)
+
+ # Feed Forward Network
+ ffn_output = self.ffn(sa_output, deterministic=deterministic)
+ ffn_output = self.output_layer_norm(ffn_output + sa_output)
+ output = (ffn_output,)
+ if output_attentions:
+ output = (sa_weights,) + output
+ return output
+
+
+class FlaxTransformer(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.layers = [
+ FlaxTransformerBlock(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.n_layers)
+ ]
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ deterministic: bool = True,
+ return_dict: bool = False,
+ ):
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ for layer_module in self.layers:
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_outputs = layer_module(
+ hidden_states=hidden_states,
+ attn_mask=attention_mask,
+ output_attentions=output_attentions,
+ deterministic=deterministic,
+ )
+ hidden_states = layer_outputs[-1]
+
+ if output_attentions:
+ assert len(layer_outputs) == 2
+ attentions = layer_outputs[0]
+ all_attentions = all_attentions + (attentions,)
+ else:
+ assert len(layer_outputs) == 1
+
+ # Add last layer
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_attentions, all_hidden_states] if v is not None)
+ return FlaxBaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
+ )
+
+
+class FlaxTransformerEncoder(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.layer = FlaxTransformer(self.config, dtype=self.dtype)
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ deterministic: bool = True,
+ return_dict: bool = False,
+ ):
+ return self.layer(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ deterministic=deterministic,
+ return_dict=return_dict,
+ )
+
+
+class FlaxDistilBertLMDecoder(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+ bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros
+
+ def setup(self):
+ self.bias = self.param("bias", self.bias_init, (self.config.vocab_size,))
+
+ def __call__(self, inputs, kernel):
+ inputs = jnp.asarray(inputs, self.dtype)
+ kernel = jnp.asarray(kernel, self.dtype)
+ y = lax.dot_general(inputs, kernel, (((inputs.ndim - 1,), (0,)), ((), ())))
+ bias = jnp.asarray(self.bias, self.dtype)
+ y = y + bias
+ return y
+
+
+class FlaxDistilBertPreTrainedModel(FlaxPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = DistilBertConfig
+ base_model_prefix = "distilbert"
+ module_class: nn.Module = None
+
+ def __init__(
+ self,
+ config: DistilBertConfig,
+ input_shape: Tuple = (1, 1),
+ seed: int = 0,
+ dtype: jnp.dtype = jnp.float32,
+ _do_init: bool = True,
+ **kwargs,
+ ):
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
+
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
+ # init input tensors
+ input_ids = jnp.zeros(input_shape, dtype="i4")
+ attention_mask = jnp.ones_like(input_ids)
+
+ params_rng, dropout_rng = jax.random.split(rng)
+ rngs = {"params": params_rng, "dropout": dropout_rng}
+
+ random_params = self.module.init(rngs, input_ids, attention_mask, return_dict=False)["params"]
+
+ if params is not None:
+ random_params = flatten_dict(unfreeze(random_params))
+ params = flatten_dict(unfreeze(params))
+ for missing_key in self._missing_keys:
+ params[missing_key] = random_params[missing_key]
+ self._missing_keys = set()
+ return freeze(unflatten_dict(params))
+ else:
+ return random_params
+
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ def __call__(
+ self,
+ input_ids,
+ attention_mask=None,
+ head_mask=None,
+ params: dict = None,
+ dropout_rng: jax.random.PRNGKey = None,
+ train: bool = False,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ):
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ if attention_mask is None:
+ attention_mask = jnp.ones_like(input_ids)
+
+ # Handle any PRNG if needed
+ rngs = {}
+ if dropout_rng is not None:
+ rngs["dropout"] = dropout_rng
+
+ return self.module.apply(
+ {"params": params or self.params},
+ jnp.array(input_ids, dtype="i4"),
+ jnp.array(attention_mask, dtype="i4"),
+ not train,
+ output_attentions,
+ output_hidden_states,
+ return_dict,
+ rngs=rngs,
+ )
+
+
+class FlaxDistilBertModule(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.embeddings = FlaxEmbeddings(self.config, dtype=self.dtype)
+ self.transformer = FlaxTransformerEncoder(self.config, dtype=self.dtype)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ input_embeds = self.embeddings(input_ids, deterministic=deterministic)
+ return self.transformer(
+ hidden_states=input_embeds,
+ attention_mask=attention_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+
+@add_start_docstrings(
+ "The bare DistilBert Model transformer outputting raw hidden-states without any specific head on top.",
+ FLAX_DISTILBERT_START_DOCSTRING,
+)
+class FlaxDistilBertModel(FlaxDistilBertPreTrainedModel):
+ module_class = FlaxDistilBertModule
+
+
+append_call_sample_docstring(FlaxDistilBertModel, _CHECKPOINT_FOR_DOC, None, _CONFIG_FOR_DOC)
+
+
+class FlaxDistilBertForMaskedLMModule(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.distilbert = FlaxDistilBertModule(self.config, dtype=self.dtype)
+ self.vocab_transform = nn.Dense(
+ self.config.dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ self.vocab_layer_norm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype)
+ if self.config.tie_word_embeddings:
+ self.vocab_projector = FlaxDistilBertLMDecoder(
+ self.config,
+ dtype=self.dtype,
+ )
+ else:
+ self.vocab_projector = nn.Dense(
+ self.config.vocab_size,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ dlbrt_output = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ deterministic=deterministic,
+ return_dict=return_dict,
+ )
+ hidden_states = dlbrt_output[0]
+ prediction_logits = self.vocab_transform(hidden_states)
+ prediction_logits = ACT2FN[self.config.activation](prediction_logits)
+ prediction_logits = self.vocab_layer_norm(prediction_logits)
+
+ if self.config.tie_word_embeddings:
+ shared_embedding = self.distilbert.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
+ prediction_logits = self.vocab_projector(prediction_logits, shared_embedding.T)
+ else:
+ prediction_logits = self.vocab_projector(prediction_logits)
+
+ if not return_dict:
+ output = (prediction_logits,) + dlbrt_output[1:]
+ return output
+
+ return FlaxMaskedLMOutput(
+ logits=prediction_logits,
+ hidden_states=dlbrt_output.hidden_states,
+ attentions=dlbrt_output.attentions,
+ )
+
+
+@add_start_docstrings("""DistilBert Model with a `language modeling` head on top.""", FLAX_DISTILBERT_START_DOCSTRING)
+class FlaxDistilBertForMaskedLM(FlaxDistilBertPreTrainedModel):
+ module_class = FlaxDistilBertForMaskedLMModule
+
+
+append_call_sample_docstring(FlaxDistilBertForMaskedLM, _CHECKPOINT_FOR_DOC, FlaxMaskedLMOutput, _CONFIG_FOR_DOC)
+
+
+class FlaxDistilBertForSequenceClassificationModule(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype)
+ self.pre_classifier = nn.Dense(
+ self.config.dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ self.dropout = nn.Dropout(rate=self.config.seq_classif_dropout)
+ self.classifier = nn.Dense(
+ self.config.num_labels,
+ dtype=self.dtype,
+ )
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ # Model
+ distilbert_output = self.distilbert(
+ input_ids,
+ attention_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_state = distilbert_output[0] # (bs, seq_len, dim)
+ pooled_output = hidden_state[:, 0] # (bs, dim)
+ pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
+ pooled_output = ACT2FN["relu"](pooled_output)
+ pooled_output = self.dropout(pooled_output, deterministic=deterministic)
+ logits = self.classifier(pooled_output) # (bs, dim)
+
+ if not return_dict:
+ return (logits,) + distilbert_output[1:]
+
+ return FlaxSequenceClassifierOutput(
+ logits=logits,
+ hidden_states=distilbert_output.hidden_states,
+ attentions=distilbert_output.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
+ pooled output) e.g. for GLUE tasks.
+ """,
+ FLAX_DISTILBERT_START_DOCSTRING,
+)
+class FlaxDistilBertForSequenceClassification(FlaxDistilBertPreTrainedModel):
+ module_class = FlaxDistilBertForSequenceClassificationModule
+
+
+append_call_sample_docstring(
+ FlaxDistilBertForSequenceClassification,
+ _CHECKPOINT_FOR_DOC,
+ FlaxSequenceClassifierOutput,
+ _CONFIG_FOR_DOC,
+)
+
+
+class FlaxDistilBertForMultipleChoiceModule(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype)
+ self.pre_classifier = nn.Dense(
+ self.config.dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ self.dropout = nn.Dropout(rate=self.config.seq_classif_dropout)
+ self.classifier = nn.Dense(
+ 1,
+ dtype=self.dtype,
+ )
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ num_choices = input_ids.shape[1]
+ input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None
+ attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None
+
+ # Model
+ outputs = self.distilbert(
+ input_ids,
+ attention_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_state = outputs[0]
+ pooled_output = hidden_state[:, 0]
+ pooled_output = self.pre_classifier(pooled_output)
+ pooled_output = ACT2FN["relu"](pooled_output)
+ pooled_output = self.dropout(pooled_output, deterministic=deterministic)
+ logits = self.classifier(pooled_output)
+
+ reshaped_logits = logits.reshape(-1, num_choices)
+
+ if not return_dict:
+ return (reshaped_logits,) + outputs[2:]
+
+ return FlaxMultipleChoiceModelOutput(
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
+ a softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ FLAX_DISTILBERT_START_DOCSTRING,
+)
+class FlaxDistilBertForMultipleChoice(FlaxDistilBertPreTrainedModel):
+ module_class = FlaxDistilBertForMultipleChoiceModule
+
+
+overwrite_call_docstring(
+ FlaxDistilBertForMultipleChoice, DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
+)
+append_call_sample_docstring(
+ FlaxDistilBertForMultipleChoice,
+ _CHECKPOINT_FOR_DOC,
+ FlaxMultipleChoiceModelOutput,
+ _CONFIG_FOR_DOC,
+)
+
+
+class FlaxDistilBertForTokenClassificationModule(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype)
+ self.dropout = nn.Dropout(rate=self.config.dropout)
+ self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ # Model
+ outputs = self.distilbert(
+ input_ids,
+ attention_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ logits = self.classifier(hidden_states)
+
+ if not return_dict:
+ return (logits,) + outputs[1:]
+
+ return FlaxTokenClassifierOutput(
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
+ for Named-Entity-Recognition (NER) tasks.
+ """,
+ FLAX_DISTILBERT_START_DOCSTRING,
+)
+class FlaxDistilBertForTokenClassification(FlaxDistilBertPreTrainedModel):
+ module_class = FlaxDistilBertForTokenClassificationModule
+
+
+append_call_sample_docstring(
+ FlaxDistilBertForTokenClassification,
+ _CHECKPOINT_FOR_DOC,
+ FlaxTokenClassifierOutput,
+ _CONFIG_FOR_DOC,
+)
+
+
+class FlaxDistilBertForQuestionAnsweringModule(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype)
+ self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype)
+ assert self.config.num_labels == 2
+ self.dropout = nn.Dropout(rate=self.config.qa_dropout)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # Model
+ distilbert_output = self.distilbert(
+ input_ids,
+ attention_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = distilbert_output[0]
+
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ logits = self.qa_outputs(hidden_states)
+ start_logits, end_logits = logits.split(self.config.num_labels, axis=-1)
+ start_logits = start_logits.squeeze(-1)
+ end_logits = end_logits.squeeze(-1)
+
+ if not return_dict:
+ return (start_logits, end_logits) + distilbert_output[1:]
+
+ return FlaxQuestionAnsweringModelOutput(
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=distilbert_output.hidden_states,
+ attentions=distilbert_output.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
+ linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ FLAX_DISTILBERT_START_DOCSTRING,
+)
+class FlaxDistilBertForQuestionAnswering(FlaxDistilBertPreTrainedModel):
+ module_class = FlaxDistilBertForQuestionAnsweringModule
+
+
+append_call_sample_docstring(
+ FlaxDistilBertForQuestionAnswering,
+ _CHECKPOINT_FOR_DOC,
+ FlaxQuestionAnsweringModelOutput,
+ _CONFIG_FOR_DOC,
+)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/distilbert/modeling_tf_distilbert.py b/venv/lib/python3.10/site-packages/transformers/models/distilbert/modeling_tf_distilbert.py
new file mode 100644
index 0000000000000000000000000000000000000000..c41deac3f2e57e53572e99fefaa4b7e26eb4309f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/distilbert/modeling_tf_distilbert.py
@@ -0,0 +1,1139 @@
+# coding=utf-8
+# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+ TF 2.0 DistilBERT model
+"""
+
+
+from __future__ import annotations
+
+import warnings
+from typing import Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ...activations_tf import get_tf_activation
+from ...modeling_tf_outputs import (
+ TFBaseModelOutput,
+ TFMaskedLMOutput,
+ TFMultipleChoiceModelOutput,
+ TFQuestionAnsweringModelOutput,
+ TFSequenceClassifierOutput,
+ TFTokenClassifierOutput,
+)
+from ...modeling_tf_utils import (
+ TFMaskedLanguageModelingLoss,
+ TFModelInputType,
+ TFMultipleChoiceLoss,
+ TFPreTrainedModel,
+ TFQuestionAnsweringLoss,
+ TFSequenceClassificationLoss,
+ TFTokenClassificationLoss,
+ get_initializer,
+ keras,
+ keras_serializable,
+ unpack_inputs,
+)
+from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+)
+from .configuration_distilbert import DistilBertConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "distilbert-base-uncased"
+_CONFIG_FOR_DOC = "DistilBertConfig"
+
+
+from ..deprecated._archive_maps import TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+class TFEmbeddings(keras.layers.Layer):
+ """Construct the embeddings from word, position and token_type embeddings."""
+
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+ self.dim = config.dim
+ self.initializer_range = config.initializer_range
+ self.max_position_embeddings = config.max_position_embeddings
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=1e-12, name="LayerNorm")
+ self.dropout = keras.layers.Dropout(rate=config.dropout)
+
+ def build(self, input_shape=None):
+ with tf.name_scope("word_embeddings"):
+ self.weight = self.add_weight(
+ name="weight",
+ shape=[self.config.vocab_size, self.dim],
+ initializer=get_initializer(initializer_range=self.initializer_range),
+ )
+
+ with tf.name_scope("position_embeddings"):
+ self.position_embeddings = self.add_weight(
+ name="embeddings",
+ shape=[self.max_position_embeddings, self.dim],
+ initializer=get_initializer(initializer_range=self.initializer_range),
+ )
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.dim])
+
+ def call(self, input_ids=None, position_ids=None, inputs_embeds=None, training=False):
+ """
+ Applies embedding based on inputs tensor.
+
+ Returns:
+ final_embeddings (`tf.Tensor`): output embedding tensor.
+ """
+ assert not (input_ids is None and inputs_embeds is None)
+
+ if input_ids is not None:
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
+
+ input_shape = shape_list(inputs_embeds)[:-1]
+
+ if position_ids is None:
+ position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
+
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
+ final_embeddings = inputs_embeds + position_embeds
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
+
+ return final_embeddings
+
+
+class TFMultiHeadSelfAttention(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.n_heads = config.n_heads
+ self.dim = config.dim
+ self.dropout = keras.layers.Dropout(config.attention_dropout)
+ self.output_attentions = config.output_attentions
+
+ assert self.dim % self.n_heads == 0, f"Hidden size {self.dim} not dividable by number of heads {self.n_heads}"
+
+ self.q_lin = keras.layers.Dense(
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="q_lin"
+ )
+ self.k_lin = keras.layers.Dense(
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="k_lin"
+ )
+ self.v_lin = keras.layers.Dense(
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="v_lin"
+ )
+ self.out_lin = keras.layers.Dense(
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="out_lin"
+ )
+
+ self.pruned_heads = set()
+ self.config = config
+
+ def prune_heads(self, heads):
+ raise NotImplementedError
+
+ def call(self, query, key, value, mask, head_mask, output_attentions, training=False):
+ """
+ Parameters:
+ query: tf.Tensor(bs, seq_length, dim)
+ key: tf.Tensor(bs, seq_length, dim)
+ value: tf.Tensor(bs, seq_length, dim)
+ mask: tf.Tensor(bs, seq_length)
+
+ Returns:
+ weights: tf.Tensor(bs, n_heads, seq_length, seq_length) Attention weights context: tf.Tensor(bs,
+ seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
+ """
+ bs, q_length, dim = shape_list(query)
+ k_length = shape_list(key)[1]
+ # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'
+ # assert key.size() == value.size()
+ dim_per_head = int(self.dim / self.n_heads)
+ dim_per_head = tf.cast(dim_per_head, dtype=tf.int32)
+ mask_reshape = [bs, 1, 1, k_length]
+
+ def shape(x):
+ """separate heads"""
+ return tf.transpose(tf.reshape(x, (bs, -1, self.n_heads, dim_per_head)), perm=(0, 2, 1, 3))
+
+ def unshape(x):
+ """group heads"""
+ return tf.reshape(tf.transpose(x, perm=(0, 2, 1, 3)), (bs, -1, self.n_heads * dim_per_head))
+
+ q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head)
+ k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head)
+ v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head)
+ q = tf.cast(q, dtype=tf.float32)
+ q = tf.multiply(q, tf.math.rsqrt(tf.cast(dim_per_head, dtype=tf.float32)))
+ k = tf.cast(k, dtype=q.dtype)
+ scores = tf.matmul(q, k, transpose_b=True) # (bs, n_heads, q_length, k_length)
+ mask = tf.reshape(mask, mask_reshape) # (bs, n_heads, qlen, klen)
+ # scores.masked_fill_(mask, -float('inf')) # (bs, n_heads, q_length, k_length)
+
+ mask = tf.cast(mask, dtype=scores.dtype)
+ scores = scores - 1e30 * (1.0 - mask)
+ weights = stable_softmax(scores, axis=-1) # (bs, n_heads, qlen, klen)
+ weights = self.dropout(weights, training=training) # (bs, n_heads, qlen, klen)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ weights = weights * head_mask
+
+ context = tf.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)
+ context = unshape(context) # (bs, q_length, dim)
+ context = self.out_lin(context) # (bs, q_length, dim)
+
+ if output_attentions:
+ return (context, weights)
+ else:
+ return (context,)
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "q_lin", None) is not None:
+ with tf.name_scope(self.q_lin.name):
+ self.q_lin.build([None, None, self.config.dim])
+ if getattr(self, "k_lin", None) is not None:
+ with tf.name_scope(self.k_lin.name):
+ self.k_lin.build([None, None, self.config.dim])
+ if getattr(self, "v_lin", None) is not None:
+ with tf.name_scope(self.v_lin.name):
+ self.v_lin.build([None, None, self.config.dim])
+ if getattr(self, "out_lin", None) is not None:
+ with tf.name_scope(self.out_lin.name):
+ self.out_lin.build([None, None, self.config.dim])
+
+
+class TFFFN(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.dropout = keras.layers.Dropout(config.dropout)
+ self.lin1 = keras.layers.Dense(
+ config.hidden_dim, kernel_initializer=get_initializer(config.initializer_range), name="lin1"
+ )
+ self.lin2 = keras.layers.Dense(
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="lin2"
+ )
+ self.activation = get_tf_activation(config.activation)
+ self.config = config
+
+ def call(self, input, training=False):
+ x = self.lin1(input)
+ x = self.activation(x)
+ x = self.lin2(x)
+ x = self.dropout(x, training=training)
+ return x
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "lin1", None) is not None:
+ with tf.name_scope(self.lin1.name):
+ self.lin1.build([None, None, self.config.dim])
+ if getattr(self, "lin2", None) is not None:
+ with tf.name_scope(self.lin2.name):
+ self.lin2.build([None, None, self.config.hidden_dim])
+
+
+class TFTransformerBlock(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.n_heads = config.n_heads
+ self.dim = config.dim
+ self.hidden_dim = config.hidden_dim
+ self.dropout = keras.layers.Dropout(config.dropout)
+ self.activation = config.activation
+ self.output_attentions = config.output_attentions
+
+ assert (
+ config.dim % config.n_heads == 0
+ ), f"Hidden size {config.dim} not dividable by number of heads {config.n_heads}"
+
+ self.attention = TFMultiHeadSelfAttention(config, name="attention")
+ self.sa_layer_norm = keras.layers.LayerNormalization(epsilon=1e-12, name="sa_layer_norm")
+
+ self.ffn = TFFFN(config, name="ffn")
+ self.output_layer_norm = keras.layers.LayerNormalization(epsilon=1e-12, name="output_layer_norm")
+ self.config = config
+
+ def call(self, x, attn_mask, head_mask, output_attentions, training=False): # removed: src_enc=None, src_len=None
+ """
+ Parameters:
+ x: tf.Tensor(bs, seq_length, dim)
+ attn_mask: tf.Tensor(bs, seq_length)
+
+ Outputs: sa_weights: tf.Tensor(bs, n_heads, seq_length, seq_length) The attention weights ffn_output:
+ tf.Tensor(bs, seq_length, dim) The output of the transformer block contextualization.
+ """
+ # Self-Attention
+ sa_output = self.attention(x, x, x, attn_mask, head_mask, output_attentions, training=training)
+ if output_attentions:
+ sa_output, sa_weights = sa_output # (bs, seq_length, dim), (bs, n_heads, seq_length, seq_length)
+ else: # To handle these `output_attentions` or `output_hidden_states` cases returning tuples
+ # assert type(sa_output) == tuple
+ sa_output = sa_output[0]
+ sa_output = self.sa_layer_norm(sa_output + x) # (bs, seq_length, dim)
+
+ # Feed Forward Network
+ ffn_output = self.ffn(sa_output, training=training) # (bs, seq_length, dim)
+ ffn_output = self.output_layer_norm(ffn_output + sa_output) # (bs, seq_length, dim)
+
+ output = (ffn_output,)
+ if output_attentions:
+ output = (sa_weights,) + output
+ return output
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "attention", None) is not None:
+ with tf.name_scope(self.attention.name):
+ self.attention.build(None)
+ if getattr(self, "sa_layer_norm", None) is not None:
+ with tf.name_scope(self.sa_layer_norm.name):
+ self.sa_layer_norm.build([None, None, self.config.dim])
+ if getattr(self, "ffn", None) is not None:
+ with tf.name_scope(self.ffn.name):
+ self.ffn.build(None)
+ if getattr(self, "output_layer_norm", None) is not None:
+ with tf.name_scope(self.output_layer_norm.name):
+ self.output_layer_norm.build([None, None, self.config.dim])
+
+
+class TFTransformer(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.n_layers = config.n_layers
+ self.output_hidden_states = config.output_hidden_states
+ self.output_attentions = config.output_attentions
+
+ self.layer = [TFTransformerBlock(config, name=f"layer_._{i}") for i in range(config.n_layers)]
+
+ def call(self, x, attn_mask, head_mask, output_attentions, output_hidden_states, return_dict, training=False):
+ # docstyle-ignore
+ """
+ Parameters:
+ x: tf.Tensor(bs, seq_length, dim) Input sequence embedded.
+ attn_mask: tf.Tensor(bs, seq_length) Attention mask on the sequence.
+
+ Returns:
+ hidden_state: tf.Tensor(bs, seq_length, dim)
+ Sequence of hidden states in the last (top) layer
+ all_hidden_states: Tuple[tf.Tensor(bs, seq_length, dim)]
+ Tuple of length n_layers with the hidden states from each layer.
+ Optional: only if output_hidden_states=True
+ all_attentions: Tuple[tf.Tensor(bs, n_heads, seq_length, seq_length)]
+ Tuple of length n_layers with the attention weights from each layer
+ Optional: only if output_attentions=True
+ """
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ hidden_state = x
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_state,)
+
+ layer_outputs = layer_module(hidden_state, attn_mask, head_mask[i], output_attentions, training=training)
+ hidden_state = layer_outputs[-1]
+
+ if output_attentions:
+ assert len(layer_outputs) == 2
+ attentions = layer_outputs[0]
+ all_attentions = all_attentions + (attentions,)
+ else:
+ assert len(layer_outputs) == 1, f"Incorrect number of outputs {len(layer_outputs)} instead of 1"
+
+ # Add last layer
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_state,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_state, all_hidden_states, all_attentions] if v is not None)
+ return TFBaseModelOutput(
+ last_hidden_state=hidden_state, hidden_states=all_hidden_states, attentions=all_attentions
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "layer", None) is not None:
+ for layer in self.layer:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+@keras_serializable
+class TFDistilBertMainLayer(keras.layers.Layer):
+ config_class = DistilBertConfig
+
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.num_hidden_layers = config.num_hidden_layers
+ self.output_attentions = config.output_attentions
+ self.output_hidden_states = config.output_hidden_states
+ self.return_dict = config.use_return_dict
+
+ self.embeddings = TFEmbeddings(config, name="embeddings") # Embeddings
+ self.transformer = TFTransformer(config, name="transformer") # Encoder
+
+ def get_input_embeddings(self):
+ return self.embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.weight = value
+ self.embeddings.vocab_size = value.shape[0]
+
+ def _prune_heads(self, heads_to_prune):
+ raise NotImplementedError
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ head_mask=None,
+ inputs_embeds=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ training=False,
+ ):
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = shape_list(input_ids)
+ elif inputs_embeds is not None:
+ input_shape = shape_list(inputs_embeds)[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if attention_mask is None:
+ attention_mask = tf.ones(input_shape) # (bs, seq_length)
+
+ attention_mask = tf.cast(attention_mask, dtype=tf.float32)
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ if head_mask is not None:
+ raise NotImplementedError
+ else:
+ head_mask = [None] * self.num_hidden_layers
+
+ embedding_output = self.embeddings(input_ids, inputs_embeds=inputs_embeds) # (bs, seq_length, dim)
+ tfmr_output = self.transformer(
+ embedding_output,
+ attention_mask,
+ head_mask,
+ output_attentions,
+ output_hidden_states,
+ return_dict,
+ training=training,
+ )
+
+ return tfmr_output # last-layer hidden-state, (all hidden_states), (all attentions)
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embeddings", None) is not None:
+ with tf.name_scope(self.embeddings.name):
+ self.embeddings.build(None)
+ if getattr(self, "transformer", None) is not None:
+ with tf.name_scope(self.transformer.name):
+ self.transformer.build(None)
+
+
+# INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL #
+class TFDistilBertPreTrainedModel(TFPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = DistilBertConfig
+ base_model_prefix = "distilbert"
+
+
+DISTILBERT_START_DOCSTRING = r"""
+
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TensorFlow models and layers in `transformers` accept two formats as input:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional argument.
+
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
+ positional argument:
+
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
+
+ Note that when creating models and layers with
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
+ about any of this, as you can just pass inputs like you would to any other Python function!
+
+
+
+ Parameters:
+ config ([`DistilBertConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+DISTILBERT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
+ [`PreTrainedTokenizer.encode`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
+ config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+"""
+
+
+@add_start_docstrings(
+ "The bare DistilBERT encoder/transformer outputting raw hidden-states without any specific head on top.",
+ DISTILBERT_START_DOCSTRING,
+)
+class TFDistilBertModel(TFDistilBertPreTrainedModel):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert") # Embeddings
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFBaseModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
+ outputs = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "distilbert", None) is not None:
+ with tf.name_scope(self.distilbert.name):
+ self.distilbert.build(None)
+
+
+class TFDistilBertLMHead(keras.layers.Layer):
+ def __init__(self, config, input_embeddings, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.dim = config.dim
+
+ # The output weights are the same as the input embeddings, but there is
+ # an output-only bias for each token.
+ self.input_embeddings = input_embeddings
+
+ def build(self, input_shape):
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
+
+ super().build(input_shape)
+
+ def get_output_embeddings(self):
+ return self.input_embeddings
+
+ def set_output_embeddings(self, value):
+ self.input_embeddings.weight = value
+ self.input_embeddings.vocab_size = shape_list(value)[0]
+
+ def get_bias(self):
+ return {"bias": self.bias}
+
+ def set_bias(self, value):
+ self.bias = value["bias"]
+ self.config.vocab_size = shape_list(value["bias"])[0]
+
+ def call(self, hidden_states):
+ seq_length = shape_list(tensor=hidden_states)[1]
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.dim])
+ hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
+
+ return hidden_states
+
+
+@add_start_docstrings(
+ """DistilBert Model with a `masked language modeling` head on top.""",
+ DISTILBERT_START_DOCSTRING,
+)
+class TFDistilBertForMaskedLM(TFDistilBertPreTrainedModel, TFMaskedLanguageModelingLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.config = config
+
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
+ self.vocab_transform = keras.layers.Dense(
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="vocab_transform"
+ )
+ self.act = get_tf_activation(config.activation)
+ self.vocab_layer_norm = keras.layers.LayerNormalization(epsilon=1e-12, name="vocab_layer_norm")
+ self.vocab_projector = TFDistilBertLMHead(config, self.distilbert.embeddings, name="vocab_projector")
+
+ def get_lm_head(self):
+ return self.vocab_projector
+
+ def get_prefix_bias_name(self):
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
+ return self.name + "/" + self.vocab_projector.name
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFMaskedLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ """
+ distilbert_output = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ hidden_states = distilbert_output[0] # (bs, seq_length, dim)
+ prediction_logits = self.vocab_transform(hidden_states) # (bs, seq_length, dim)
+ prediction_logits = self.act(prediction_logits) # (bs, seq_length, dim)
+ prediction_logits = self.vocab_layer_norm(prediction_logits) # (bs, seq_length, dim)
+ prediction_logits = self.vocab_projector(prediction_logits)
+
+ loss = None if labels is None else self.hf_compute_loss(labels, prediction_logits)
+
+ if not return_dict:
+ output = (prediction_logits,) + distilbert_output[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFMaskedLMOutput(
+ loss=loss,
+ logits=prediction_logits,
+ hidden_states=distilbert_output.hidden_states,
+ attentions=distilbert_output.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "distilbert", None) is not None:
+ with tf.name_scope(self.distilbert.name):
+ self.distilbert.build(None)
+ if getattr(self, "vocab_transform", None) is not None:
+ with tf.name_scope(self.vocab_transform.name):
+ self.vocab_transform.build([None, None, self.config.dim])
+ if getattr(self, "vocab_layer_norm", None) is not None:
+ with tf.name_scope(self.vocab_layer_norm.name):
+ self.vocab_layer_norm.build([None, None, self.config.dim])
+ if getattr(self, "vocab_projector", None) is not None:
+ with tf.name_scope(self.vocab_projector.name):
+ self.vocab_projector.build(None)
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
+ pooled output) e.g. for GLUE tasks.
+ """,
+ DISTILBERT_START_DOCSTRING,
+)
+class TFDistilBertForSequenceClassification(TFDistilBertPreTrainedModel, TFSequenceClassificationLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.num_labels = config.num_labels
+
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
+ self.pre_classifier = keras.layers.Dense(
+ config.dim,
+ kernel_initializer=get_initializer(config.initializer_range),
+ activation="relu",
+ name="pre_classifier",
+ )
+ self.classifier = keras.layers.Dense(
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
+ )
+ self.dropout = keras.layers.Dropout(config.seq_classif_dropout)
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFSequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ distilbert_output = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ hidden_state = distilbert_output[0] # (bs, seq_len, dim)
+ pooled_output = hidden_state[:, 0] # (bs, dim)
+ pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
+ pooled_output = self.dropout(pooled_output, training=training) # (bs, dim)
+ logits = self.classifier(pooled_output) # (bs, dim)
+
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
+
+ if not return_dict:
+ output = (logits,) + distilbert_output[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFSequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=distilbert_output.hidden_states,
+ attentions=distilbert_output.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "distilbert", None) is not None:
+ with tf.name_scope(self.distilbert.name):
+ self.distilbert.build(None)
+ if getattr(self, "pre_classifier", None) is not None:
+ with tf.name_scope(self.pre_classifier.name):
+ self.pre_classifier.build([None, None, self.config.dim])
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.dim])
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
+ for Named-Entity-Recognition (NER) tasks.
+ """,
+ DISTILBERT_START_DOCSTRING,
+)
+class TFDistilBertForTokenClassification(TFDistilBertPreTrainedModel, TFTokenClassificationLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.num_labels = config.num_labels
+
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
+ self.dropout = keras.layers.Dropout(config.dropout)
+ self.classifier = keras.layers.Dense(
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFTokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+ outputs = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+ sequence_output = self.dropout(sequence_output, training=training)
+ logits = self.classifier(sequence_output)
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFTokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "distilbert", None) is not None:
+ with tf.name_scope(self.distilbert.name):
+ self.distilbert.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.hidden_size])
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
+ a softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ DISTILBERT_START_DOCSTRING,
+)
+class TFDistilBertForMultipleChoice(TFDistilBertPreTrainedModel, TFMultipleChoiceLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
+ self.dropout = keras.layers.Dropout(config.seq_classif_dropout)
+ self.pre_classifier = keras.layers.Dense(
+ config.dim,
+ kernel_initializer=get_initializer(config.initializer_range),
+ activation="relu",
+ name="pre_classifier",
+ )
+ self.classifier = keras.layers.Dense(
+ 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(
+ DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
+ )
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFMultipleChoiceModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
+ """
+ if input_ids is not None:
+ num_choices = shape_list(input_ids)[1]
+ seq_length = shape_list(input_ids)[2]
+ else:
+ num_choices = shape_list(inputs_embeds)[1]
+ seq_length = shape_list(inputs_embeds)[2]
+
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
+ flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
+ flat_inputs_embeds = (
+ tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3]))
+ if inputs_embeds is not None
+ else None
+ )
+ distilbert_output = self.distilbert(
+ flat_input_ids,
+ flat_attention_mask,
+ head_mask,
+ flat_inputs_embeds,
+ output_attentions,
+ output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ hidden_state = distilbert_output[0] # (bs, seq_len, dim)
+ pooled_output = hidden_state[:, 0] # (bs, dim)
+ pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
+ pooled_output = self.dropout(pooled_output, training=training) # (bs, dim)
+ logits = self.classifier(pooled_output)
+ reshaped_logits = tf.reshape(logits, (-1, num_choices))
+
+ loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits)
+
+ if not return_dict:
+ output = (reshaped_logits,) + distilbert_output[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFMultipleChoiceModelOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ hidden_states=distilbert_output.hidden_states,
+ attentions=distilbert_output.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "distilbert", None) is not None:
+ with tf.name_scope(self.distilbert.name):
+ self.distilbert.build(None)
+ if getattr(self, "pre_classifier", None) is not None:
+ with tf.name_scope(self.pre_classifier.name):
+ self.pre_classifier.build([None, None, self.config.dim])
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.dim])
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
+ linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ DISTILBERT_START_DOCSTRING,
+)
+class TFDistilBertForQuestionAnswering(TFDistilBertPreTrainedModel, TFQuestionAnsweringLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
+ self.qa_outputs = keras.layers.Dense(
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
+ )
+ assert config.num_labels == 2, f"Incorrect number of labels {config.num_labels} instead of 2"
+ self.dropout = keras.layers.Dropout(config.qa_dropout)
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFQuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ start_positions: np.ndarray | tf.Tensor | None = None,
+ end_positions: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
+ r"""
+ start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ distilbert_output = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ hidden_states = distilbert_output[0] # (bs, max_query_len, dim)
+ hidden_states = self.dropout(hidden_states, training=training) # (bs, max_query_len, dim)
+ logits = self.qa_outputs(hidden_states) # (bs, max_query_len, 2)
+ start_logits, end_logits = tf.split(logits, 2, axis=-1)
+ start_logits = tf.squeeze(start_logits, axis=-1)
+ end_logits = tf.squeeze(end_logits, axis=-1)
+
+ loss = None
+ if start_positions is not None and end_positions is not None:
+ labels = {"start_position": start_positions}
+ labels["end_position"] = end_positions
+ loss = self.hf_compute_loss(labels, (start_logits, end_logits))
+
+ if not return_dict:
+ output = (start_logits, end_logits) + distilbert_output[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFQuestionAnsweringModelOutput(
+ loss=loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=distilbert_output.hidden_states,
+ attentions=distilbert_output.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "distilbert", None) is not None:
+ with tf.name_scope(self.distilbert.name):
+ self.distilbert.build(None)
+ if getattr(self, "qa_outputs", None) is not None:
+ with tf.name_scope(self.qa_outputs.name):
+ self.qa_outputs.build([None, None, self.config.dim])
diff --git a/venv/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert.py b/venv/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff8854ba3dcf893a1a38d9491b2aa148a64057ca
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert.py
@@ -0,0 +1,514 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for DistilBERT."""
+
+import collections
+import os
+import unicodedata
+from typing import List, Optional, Tuple
+
+from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
+
+
+# Copied from transformers.models.bert.tokenization_bert.load_vocab
+def load_vocab(vocab_file):
+ """Loads a vocabulary file into a dictionary."""
+ vocab = collections.OrderedDict()
+ with open(vocab_file, "r", encoding="utf-8") as reader:
+ tokens = reader.readlines()
+ for index, token in enumerate(tokens):
+ token = token.rstrip("\n")
+ vocab[token] = index
+ return vocab
+
+
+# Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
+def whitespace_tokenize(text):
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
+ text = text.strip()
+ if not text:
+ return []
+ tokens = text.split()
+ return tokens
+
+
+class DistilBertTokenizer(PreTrainedTokenizer):
+ r"""
+ Construct a DistilBERT tokenizer. Based on WordPiece.
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ File containing the vocabulary.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
+ Whether or not to do basic tokenization before WordPiece.
+ never_split (`Iterable`, *optional*):
+ Collection of tokens which will never be split during tokenization. Only has an effect when
+ `do_basic_tokenize=True`
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
+ The token used for padding, for example when batching sequences of different lengths.
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters.
+
+ This should likely be deactivated for Japanese (see this
+ [issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file,
+ do_lower_case=True,
+ do_basic_tokenize=True,
+ never_split=None,
+ unk_token="[UNK]",
+ sep_token="[SEP]",
+ pad_token="[PAD]",
+ cls_token="[CLS]",
+ mask_token="[MASK]",
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ **kwargs,
+ ):
+ if not os.path.isfile(vocab_file):
+ raise ValueError(
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
+ " model use `tokenizer = DistilBertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
+ )
+ self.vocab = load_vocab(vocab_file)
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
+ self.do_basic_tokenize = do_basic_tokenize
+ if do_basic_tokenize:
+ self.basic_tokenizer = BasicTokenizer(
+ do_lower_case=do_lower_case,
+ never_split=never_split,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ )
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
+
+ super().__init__(
+ do_lower_case=do_lower_case,
+ do_basic_tokenize=do_basic_tokenize,
+ never_split=never_split,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ **kwargs,
+ )
+
+ @property
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.do_lower_case
+ def do_lower_case(self):
+ return self.basic_tokenizer.do_lower_case
+
+ @property
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.vocab_size
+ def vocab_size(self):
+ return len(self.vocab)
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_vocab
+ def get_vocab(self):
+ return dict(self.vocab, **self.added_tokens_encoder)
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._tokenize
+ def _tokenize(self, text, split_special_tokens=False):
+ split_tokens = []
+ if self.do_basic_tokenize:
+ for token in self.basic_tokenizer.tokenize(
+ text, never_split=self.all_special_tokens if not split_special_tokens else None
+ ):
+ # If the token is part of the never_split set
+ if token in self.basic_tokenizer.never_split:
+ split_tokens.append(token)
+ else:
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
+ else:
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
+ return split_tokens
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_token_to_id
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_id_to_token
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.ids_to_tokens.get(index, self.unk_token)
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.convert_tokens_to_string
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ out_string = " ".join(tokens).replace(" ##", "").strip()
+ return out_string
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.build_inputs_with_special_tokens
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A BERT sequence has the following format:
+
+ - single sequence: `[CLS] X [SEP]`
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ return cls + token_ids_0 + sep + token_ids_1 + sep
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_special_tokens_mask
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is not None:
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1]
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.create_token_type_ids_from_sequences
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
+ pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.save_vocabulary
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ index = 0
+ if os.path.isdir(save_directory):
+ vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+ else:
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
+ with open(vocab_file, "w", encoding="utf-8") as writer:
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
+ if index != token_index:
+ logger.warning(
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
+ " Please check that the vocabulary is not corrupted!"
+ )
+ index = token_index
+ writer.write(token + "\n")
+ index += 1
+ return (vocab_file,)
+
+
+# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
+class BasicTokenizer(object):
+ """
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
+
+ Args:
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ never_split (`Iterable`, *optional*):
+ Collection of tokens which will never be split during tokenization. Only has an effect when
+ `do_basic_tokenize=True`
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters.
+
+ This should likely be deactivated for Japanese (see this
+ [issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
+ the full context of the words, such as contractions.
+ """
+
+ def __init__(
+ self,
+ do_lower_case=True,
+ never_split=None,
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ do_split_on_punc=True,
+ ):
+ if never_split is None:
+ never_split = []
+ self.do_lower_case = do_lower_case
+ self.never_split = set(never_split)
+ self.tokenize_chinese_chars = tokenize_chinese_chars
+ self.strip_accents = strip_accents
+ self.do_split_on_punc = do_split_on_punc
+
+ def tokenize(self, text, never_split=None):
+ """
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
+
+ Args:
+ never_split (`List[str]`, *optional*)
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
+ """
+ # union() returns a new set by concatenating the two sets.
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
+ text = self._clean_text(text)
+
+ # This was added on November 1st, 2018 for the multilingual and Chinese
+ # models. This is also applied to the English models now, but it doesn't
+ # matter since the English models were not trained on any Chinese data
+ # and generally don't have any Chinese data in them (there are Chinese
+ # characters in the vocabulary because Wikipedia does have some Chinese
+ # words in the English Wikipedia.).
+ if self.tokenize_chinese_chars:
+ text = self._tokenize_chinese_chars(text)
+ # prevents treating the same character with different unicode codepoints as different characters
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
+ split_tokens = []
+ for token in orig_tokens:
+ if token not in never_split:
+ if self.do_lower_case:
+ token = token.lower()
+ if self.strip_accents is not False:
+ token = self._run_strip_accents(token)
+ elif self.strip_accents:
+ token = self._run_strip_accents(token)
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
+
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
+ return output_tokens
+
+ def _run_strip_accents(self, text):
+ """Strips accents from a piece of text."""
+ text = unicodedata.normalize("NFD", text)
+ output = []
+ for char in text:
+ cat = unicodedata.category(char)
+ if cat == "Mn":
+ continue
+ output.append(char)
+ return "".join(output)
+
+ def _run_split_on_punc(self, text, never_split=None):
+ """Splits punctuation on a piece of text."""
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
+ return [text]
+ chars = list(text)
+ i = 0
+ start_new_word = True
+ output = []
+ while i < len(chars):
+ char = chars[i]
+ if _is_punctuation(char):
+ output.append([char])
+ start_new_word = True
+ else:
+ if start_new_word:
+ output.append([])
+ start_new_word = False
+ output[-1].append(char)
+ i += 1
+
+ return ["".join(x) for x in output]
+
+ def _tokenize_chinese_chars(self, text):
+ """Adds whitespace around any CJK character."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if self._is_chinese_char(cp):
+ output.append(" ")
+ output.append(char)
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+ def _is_chinese_char(self, cp):
+ """Checks whether CP is the codepoint of a CJK character."""
+ # This defines a "chinese character" as anything in the CJK Unicode block:
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
+ #
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
+ # despite its name. The modern Korean Hangul alphabet is a different block,
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
+ # space-separated words, so they are not treated specially and handled
+ # like the all of the other languages.
+ if (
+ (cp >= 0x4E00 and cp <= 0x9FFF)
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
+ or (cp >= 0xF900 and cp <= 0xFAFF)
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
+ ): #
+ return True
+
+ return False
+
+ def _clean_text(self, text):
+ """Performs invalid character removal and whitespace cleanup on text."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
+ continue
+ if _is_whitespace(char):
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+
+# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
+class WordpieceTokenizer(object):
+ """Runs WordPiece tokenization."""
+
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
+ self.vocab = vocab
+ self.unk_token = unk_token
+ self.max_input_chars_per_word = max_input_chars_per_word
+
+ def tokenize(self, text):
+ """
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
+ tokenization using the given vocabulary.
+
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
+
+ Args:
+ text: A single token or whitespace separated tokens. This should have
+ already been passed through *BasicTokenizer*.
+
+ Returns:
+ A list of wordpiece tokens.
+ """
+
+ output_tokens = []
+ for token in whitespace_tokenize(text):
+ chars = list(token)
+ if len(chars) > self.max_input_chars_per_word:
+ output_tokens.append(self.unk_token)
+ continue
+
+ is_bad = False
+ start = 0
+ sub_tokens = []
+ while start < len(chars):
+ end = len(chars)
+ cur_substr = None
+ while start < end:
+ substr = "".join(chars[start:end])
+ if start > 0:
+ substr = "##" + substr
+ if substr in self.vocab:
+ cur_substr = substr
+ break
+ end -= 1
+ if cur_substr is None:
+ is_bad = True
+ break
+ sub_tokens.append(cur_substr)
+ start = end
+
+ if is_bad:
+ output_tokens.append(self.unk_token)
+ else:
+ output_tokens.extend(sub_tokens)
+ return output_tokens
diff --git a/venv/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert_fast.py b/venv/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..f1d69a27d67c081301adb22b263928eb02f4dd84
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert_fast.py
@@ -0,0 +1,176 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for DistilBERT."""
+
+import json
+from typing import List, Optional, Tuple
+
+from tokenizers import normalizers
+
+from ...tokenization_utils_fast import PreTrainedTokenizerFast
+from ...utils import logging
+from .tokenization_distilbert import DistilBertTokenizer
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
+
+
+class DistilBertTokenizerFast(PreTrainedTokenizerFast):
+ r"""
+ Construct a "fast" DistilBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
+
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
+ refer to this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ File containing the vocabulary.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
+ The token used for padding, for example when batching sequences of different lengths.
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ clean_text (`bool`, *optional*, defaults to `True`):
+ Whether or not to clean the text before tokenization by removing any control characters and replacing all
+ whitespaces by the classic one.
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
+ issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
+ The prefix for subwords.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "attention_mask"]
+ slow_tokenizer_class = DistilBertTokenizer
+
+ def __init__(
+ self,
+ vocab_file=None,
+ tokenizer_file=None,
+ do_lower_case=True,
+ unk_token="[UNK]",
+ sep_token="[SEP]",
+ pad_token="[PAD]",
+ cls_token="[CLS]",
+ mask_token="[MASK]",
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ **kwargs,
+ ):
+ super().__init__(
+ vocab_file,
+ tokenizer_file=tokenizer_file,
+ do_lower_case=do_lower_case,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ **kwargs,
+ )
+
+ normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
+ if (
+ normalizer_state.get("lowercase", do_lower_case) != do_lower_case
+ or normalizer_state.get("strip_accents", strip_accents) != strip_accents
+ or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
+ ):
+ normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
+ normalizer_state["lowercase"] = do_lower_case
+ normalizer_state["strip_accents"] = strip_accents
+ normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
+ self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
+
+ self.do_lower_case = do_lower_case
+
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.build_inputs_with_special_tokens
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A BERT sequence has the following format:
+
+ - single sequence: `[CLS] X [SEP]`
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+
+ if token_ids_1 is not None:
+ output += token_ids_1 + [self.sep_token_id]
+
+ return output
+
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.create_token_type_ids_from_sequences
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
+ pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.save_vocabulary
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
+ return tuple(files)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ba71f1f7c7a9e121cf3bdda9c1604cb5021a8a3b
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/__init__.py
@@ -0,0 +1,82 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_flax_available,
+ is_tf_available,
+ is_torch_available,
+)
+
+
+_import_structure = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_encoder_decoder"] = ["EncoderDecoderModel"]
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_encoder_decoder"] = ["TFEncoderDecoderModel"]
+
+try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_flax_encoder_decoder"] = ["FlaxEncoderDecoderModel"]
+
+if TYPE_CHECKING:
+ from .configuration_encoder_decoder import EncoderDecoderConfig
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_encoder_decoder import EncoderDecoderModel
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
+
+ try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fad4eebb7943e9e6d315f349bb833d89c8e5faad
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/configuration_encoder_decoder.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/configuration_encoder_decoder.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2d4da88a1793a898eeb358382c23e507132402b1
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/configuration_encoder_decoder.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/modeling_encoder_decoder.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/modeling_encoder_decoder.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3a828a6a896d7db6146ef5d4b18c1167ff1e22fe
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/modeling_encoder_decoder.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/modeling_flax_encoder_decoder.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/modeling_flax_encoder_decoder.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a045b34c828344c8ef1879ceafe3484ad8f894e8
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/modeling_flax_encoder_decoder.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/modeling_tf_encoder_decoder.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/modeling_tf_encoder_decoder.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ad14d8e315cc23955fa9e9bab34c03b5c8b7557a
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/__pycache__/modeling_tf_encoder_decoder.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/configuration_encoder_decoder.py b/venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/configuration_encoder_decoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..8c0ae2771e81f16ab1f7e82a69e91f2fa1ad5407
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/configuration_encoder_decoder.py
@@ -0,0 +1,106 @@
+# coding=utf-8
+# Copyright 2020 The HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class EncoderDecoderConfig(PretrainedConfig):
+ r"""
+ [`EncoderDecoderConfig`] is the configuration class to store the configuration of a [`EncoderDecoderModel`]. It is
+ used to instantiate an Encoder Decoder model according to the specified arguments, defining the encoder and decoder
+ configs.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ kwargs (*optional*):
+ Dictionary of keyword arguments. Notably:
+
+ - **encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines
+ the encoder config.
+ - **decoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines
+ the decoder config.
+
+ Examples:
+
+ ```python
+ >>> from transformers import BertConfig, EncoderDecoderConfig, EncoderDecoderModel
+
+ >>> # Initializing a BERT google-bert/bert-base-uncased style configuration
+ >>> config_encoder = BertConfig()
+ >>> config_decoder = BertConfig()
+
+ >>> config = EncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder)
+
+ >>> # Initializing a Bert2Bert model (with random weights) from the google-bert/bert-base-uncased style configurations
+ >>> model = EncoderDecoderModel(config=config)
+
+ >>> # Accessing the model configuration
+ >>> config_encoder = model.config.encoder
+ >>> config_decoder = model.config.decoder
+ >>> # set decoder config to causal lm
+ >>> config_decoder.is_decoder = True
+ >>> config_decoder.add_cross_attention = True
+
+ >>> # Saving the model, including its configuration
+ >>> model.save_pretrained("my-model")
+
+ >>> # loading model and config from pretrained folder
+ >>> encoder_decoder_config = EncoderDecoderConfig.from_pretrained("my-model")
+ >>> model = EncoderDecoderModel.from_pretrained("my-model", config=encoder_decoder_config)
+ ```"""
+
+ model_type = "encoder-decoder"
+ is_composition = True
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ assert (
+ "encoder" in kwargs and "decoder" in kwargs
+ ), "Config has to be initialized with encoder and decoder config"
+ encoder_config = kwargs.pop("encoder")
+ encoder_model_type = encoder_config.pop("model_type")
+ decoder_config = kwargs.pop("decoder")
+ decoder_model_type = decoder_config.pop("model_type")
+
+ from ..auto.configuration_auto import AutoConfig
+
+ self.encoder = AutoConfig.for_model(encoder_model_type, **encoder_config)
+ self.decoder = AutoConfig.for_model(decoder_model_type, **decoder_config)
+ self.is_encoder_decoder = True
+
+ @classmethod
+ def from_encoder_decoder_configs(
+ cls, encoder_config: PretrainedConfig, decoder_config: PretrainedConfig, **kwargs
+ ) -> PretrainedConfig:
+ r"""
+ Instantiate a [`EncoderDecoderConfig`] (or a derived class) from a pre-trained encoder model configuration and
+ decoder model configuration.
+
+ Returns:
+ [`EncoderDecoderConfig`]: An instance of a configuration object
+ """
+ logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config")
+ decoder_config.is_decoder = True
+ decoder_config.add_cross_attention = True
+
+ return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/modeling_encoder_decoder.py b/venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/modeling_encoder_decoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..16248fee64ce593a6d68d309259115abb083aba2
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/modeling_encoder_decoder.py
@@ -0,0 +1,693 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Classes to support Encoder-Decoder architectures"""
+
+
+import gc
+import inspect
+import os
+import tempfile
+import warnings
+from typing import Optional, Tuple, Union
+
+import torch
+from torch import nn
+from torch.nn import CrossEntropyLoss
+
+from ...configuration_utils import PretrainedConfig
+from ...modeling_outputs import BaseModelOutput, Seq2SeqLMOutput
+from ...modeling_utils import PreTrainedModel
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+from ..auto.configuration_auto import AutoConfig
+from ..auto.modeling_auto import AutoModel, AutoModelForCausalLM
+from .configuration_encoder_decoder import EncoderDecoderConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "EncoderDecoderConfig"
+
+DEPRECATION_WARNING = (
+ "Version v4.12.0 introduces a better way to train encoder-decoder models by computing the loss inside the"
+ " encoder-decoder framework rather than in the decoder itself. You may observe training discrepancies if"
+ " fine-tuning a model trained with versions anterior to 4.12.0. The decoder_input_ids are now created based on the"
+ " labels, no need to pass them yourself anymore."
+)
+
+ENCODER_DECODER_START_DOCSTRING = r"""
+ This class can be used to initialize a sequence-to-sequence model with any pretrained autoencoding model as the
+ encoder and any pretrained autoregressive model as the decoder. The encoder is loaded via
+ [`~AutoModel.from_pretrained`] function and the decoder is loaded via [`~AutoModelForCausalLM.from_pretrained`]
+ function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream
+ generative task, like summarization.
+
+ The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation
+ tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation
+ Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi
+ Zhou, Wei Li, Peter J. Liu.
+
+ After such an Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models
+ (see the examples for more information).
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`EncoderDecoderConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+ENCODER_DECODER_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ For training, `decoder_input_ids` are automatically created by the model by shifting the `labels` to the
+ right, replacing -100 by the `pad_token_id` and prepending them with the `decoder_start_token_id`.
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+ encoder_outputs (`tuple(torch.FloatTensor)`, *optional*):
+ This tuple must consist of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
+ `last_hidden_state` (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`) is a tensor
+ of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the
+ decoder.
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
+ representation. This is useful if you want more control over how to convert `decoder_input_ids` indices
+ into associated vectors than the model's internal embedding lookup matrix.
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss for the decoder. Indices should be in `[-100, 0,
+ ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ If set to `True`, the model will return a [`~utils.Seq2SeqLMOutput`] instead of a plain tuple.
+ kwargs (*optional*): Remaining dictionary of keyword arguments. Keyword arguments come in two flavors:
+
+ - Without a prefix which will be input as `**encoder_kwargs` for the encoder forward function.
+ - With a *decoder_* prefix which will be input as `**decoder_kwargs` for the decoder forward function.
+"""
+
+
+def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
+ """
+ Shift input ids one token to the right.
+ """
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
+ shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
+ if decoder_start_token_id is None:
+ raise ValueError("Make sure to set the decoder_start_token_id attribute of the model's configuration.")
+ shifted_input_ids[:, 0] = decoder_start_token_id
+
+ if pad_token_id is None:
+ raise ValueError("Make sure to set the pad_token_id attribute of the model's configuration.")
+ # replace possible -100 values in labels by `pad_token_id`
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
+
+ return shifted_input_ids
+
+
+@add_start_docstrings(ENCODER_DECODER_START_DOCSTRING)
+class EncoderDecoderModel(PreTrainedModel):
+ r"""
+ [`EncoderDecoderModel`] is a generic model class that will be instantiated as a transformer architecture with one
+ of the base model classes of the library as encoder and another one as decoder when created with the
+ :meth*~transformers.AutoModel.from_pretrained* class method for the encoder and
+ :meth*~transformers.AutoModelForCausalLM.from_pretrained* class method for the decoder.
+ """
+
+ config_class = EncoderDecoderConfig
+ base_model_prefix = "encoder_decoder"
+ main_input_name = "input_ids"
+ supports_gradient_checkpointing = True
+
+ def __init__(
+ self,
+ config: Optional[PretrainedConfig] = None,
+ encoder: Optional[PreTrainedModel] = None,
+ decoder: Optional[PreTrainedModel] = None,
+ ):
+ if config is None and (encoder is None or decoder is None):
+ raise ValueError("Either a configuration or an encoder and a decoder has to be provided.")
+ if config is None:
+ config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config)
+ else:
+ if not isinstance(config, self.config_class):
+ raise ValueError(f"Config: {config} has to be of type {self.config_class}")
+
+ if config.decoder.cross_attention_hidden_size is not None:
+ if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size:
+ raise ValueError(
+ "If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal"
+ f" to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for"
+ f" `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for"
+ " `config.encoder.hidden_size`."
+ )
+
+ # initialize with config
+ super().__init__(config)
+
+ if encoder is None:
+ from ..auto.modeling_auto import AutoModel
+
+ encoder = AutoModel.from_config(config.encoder)
+
+ if decoder is None:
+ from ..auto.modeling_auto import AutoModelForCausalLM
+
+ decoder = AutoModelForCausalLM.from_config(config.decoder)
+
+ self.encoder = encoder
+ self.decoder = decoder
+
+ if self.encoder.config.to_dict() != self.config.encoder.to_dict():
+ logger.warning(
+ f"Config of the encoder: {self.encoder.__class__} is overwritten by shared encoder config:"
+ f" {self.config.encoder}"
+ )
+ if self.decoder.config.to_dict() != self.config.decoder.to_dict():
+ logger.warning(
+ f"Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config:"
+ f" {self.config.decoder}"
+ )
+
+ # make sure that the individual model's config refers to the shared config
+ # so that the updates to the config will be synced
+ self.encoder.config = self.config.encoder
+ self.decoder.config = self.config.decoder
+
+ # encoder outputs might need to be projected to different dimension for decoder
+ if (
+ self.encoder.config.hidden_size != self.decoder.config.hidden_size
+ and self.decoder.config.cross_attention_hidden_size is None
+ ):
+ self.enc_to_dec_proj = nn.Linear(self.encoder.config.hidden_size, self.decoder.config.hidden_size)
+
+ if self.encoder.get_output_embeddings() is not None:
+ raise ValueError(
+ f"The encoder {self.encoder} should not have a LM Head. Please use a model without LM Head"
+ )
+
+ decoder_signature = set(inspect.signature(self.decoder.forward).parameters.keys())
+ if "encoder_hidden_states" not in decoder_signature:
+ raise ValueError(
+ "The selected decoder is not prepared for the encoder hidden states to be passed. Please see the "
+ "following discussion on GitHub: https://github.com/huggingface/transformers/issues/23350"
+ )
+
+ # tie encoder, decoder weights if config set accordingly
+ self.tie_weights()
+
+ def tie_weights(self):
+ # tie encoder & decoder if needed
+ if self.config.tie_encoder_decoder:
+ # tie encoder and decoder base model
+ decoder_base_model_prefix = self.decoder.base_model_prefix
+ tied_weights = self._tie_encoder_decoder_weights(
+ self.encoder,
+ self.decoder._modules[decoder_base_model_prefix],
+ self.decoder.base_model_prefix,
+ "encoder",
+ )
+ # Setting a dynamic variable instead of `_tied_weights_keys` because it's a class
+ # attributed not an instance member, therefore modifying it will modify the entire class
+ # Leading to issues on subsequent calls by different tests or subsequent calls.
+ self._dynamic_tied_weights_keys = tied_weights
+
+ def get_encoder(self):
+ return self.encoder
+
+ def get_decoder(self):
+ return self.decoder
+
+ def get_input_embeddings(self):
+ return self.encoder.get_input_embeddings()
+
+ def get_output_embeddings(self):
+ return self.decoder.get_output_embeddings()
+
+ def set_output_embeddings(self, new_embeddings):
+ return self.decoder.set_output_embeddings(new_embeddings)
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
+ r"""
+ Example:
+
+ ```python
+ >>> from transformers import EncoderDecoderModel
+
+ >>> model = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2bert-cnn_dailymail-fp16")
+ ```"""
+
+ from_tf = kwargs.pop("from_tf", False)
+ if from_tf:
+ from transformers import TFEncoderDecoderModel
+
+ # a workaround to load from tensorflow checkpoint
+ # Using `_tf_model` won't work, because the weight names in the encoder/decoder of `_tf_model` get
+ # extended before saving those components. For example, The name of `_tf_model.encoder.vit` is
+ # `[top model name]/encoder/vit`, but the name of `tf_model.encoder.vit` is `[top model name]/vit`. The
+ # [top model name] is handled (stripped) by the conversion method, and the former case gets extra `encoder`,
+ # which should not occur when we want to save the components alone.
+ # There was a (very) ugly potential fix, which wasn't integrated to `transformers`: see
+ # https://github.com/huggingface/transformers/pull/13222/commits/dbb3c9de76eee235791d2064094654637c99f36d#r697304245
+ # (the change in `src/transformers/modeling_tf_utils.py`)
+ _tf_model = TFEncoderDecoderModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
+ config = _tf_model.config
+
+ # Using `tf_model` instead
+ encoder = _tf_model.encoder.__class__(_tf_model.config.encoder)
+ decoder = _tf_model.decoder.__class__(_tf_model.config.decoder)
+ # Make sure models are built
+ encoder(encoder.dummy_inputs)
+ decoder(decoder.dummy_inputs)
+
+ # Get the variable correspondence between `_tf_model` and `encoder` and `decoder`
+ encoder_variables = {}
+ for v in encoder.trainable_variables + encoder.non_trainable_variables:
+ encoder_variables["/".join(v.name.split("/")[1:])] = v
+ decoder_variables = {}
+ for v in decoder.trainable_variables + decoder.non_trainable_variables:
+ decoder_variables["/".join(v.name.split("/")[1:])] = v
+
+ _encoder_variables = {}
+ for v in _tf_model.encoder.trainable_variables + _tf_model.encoder.non_trainable_variables:
+ _encoder_variables["/".join(v.name.split("/")[2:])] = v
+ _decoder_variables = {}
+ for v in _tf_model.decoder.trainable_variables + _tf_model.decoder.non_trainable_variables:
+ _decoder_variables["/".join(v.name.split("/")[2:])] = v
+
+ # assign weight values to `encoder` and `decoder` from `_tf_model`
+ for name, v in encoder_variables.items():
+ v.assign(_encoder_variables[name])
+ for name, v in decoder_variables.items():
+ v.assign(_decoder_variables[name])
+
+ tf_model = TFEncoderDecoderModel(encoder=encoder, decoder=decoder)
+
+ # Deal with `enc_to_dec_proj`
+ if hasattr(_tf_model, "enc_to_dec_proj"):
+ tf_model(tf_model.dummy_inputs)
+ tf_model.enc_to_dec_proj.kernel.assign(_tf_model.enc_to_dec_proj.kernel)
+ tf_model.enc_to_dec_proj.bias.assign(_tf_model.enc_to_dec_proj.bias)
+
+ with tempfile.TemporaryDirectory() as tmpdirname:
+ encoder_dir = os.path.join(tmpdirname, "encoder")
+ decoder_dir = os.path.join(tmpdirname, "decoder")
+ tf_model.encoder.save_pretrained(encoder_dir)
+ tf_model.decoder.save_pretrained(decoder_dir)
+
+ if hasattr(tf_model, "enc_to_dec_proj"):
+ enc_to_dec_proj_weight = torch.transpose(
+ torch.from_numpy(tf_model.enc_to_dec_proj.kernel.numpy()), 1, 0
+ )
+ enc_to_dec_proj_bias = torch.from_numpy(tf_model.enc_to_dec_proj.bias.numpy())
+
+ del _tf_model
+ del tf_model
+ gc.collect()
+
+ model = EncoderDecoderModel.from_encoder_decoder_pretrained(
+ encoder_dir, decoder_dir, encoder_from_tf=True, decoder_from_tf=True
+ )
+ # This is only for copying some specific attributes of this particular model.
+ model.config = config
+
+ if hasattr(model, "enc_to_dec_proj"):
+ model.enc_to_dec_proj.weight.data = enc_to_dec_proj_weight.contiguous()
+ model.enc_to_dec_proj.bias.data = enc_to_dec_proj_bias.contiguous()
+
+ return model
+
+ # At the moment fast initialization is not supported for composite models
+ if kwargs.get("_fast_init", False):
+ logger.warning(
+ "Fast initialization is currently not supported for EncoderDecoderModel. "
+ "Falling back to slow initialization..."
+ )
+ kwargs["_fast_init"] = False
+
+ return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
+
+ @classmethod
+ def from_encoder_decoder_pretrained(
+ cls,
+ encoder_pretrained_model_name_or_path: str = None,
+ decoder_pretrained_model_name_or_path: str = None,
+ *model_args,
+ **kwargs,
+ ) -> PreTrainedModel:
+ r"""
+ Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model
+ checkpoints.
+
+
+ The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
+ the model, you need to first set it back in training mode with `model.train()`.
+
+ Params:
+ encoder_pretrained_model_name_or_path (`str`, *optional*):
+ Information necessary to initiate the encoder. Can be either:
+
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
+ - A path to a *directory* containing model weights saved using
+ [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
+ - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
+ this case, `from_tf` should be set to `True` and a configuration object should be provided as
+ `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
+ PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
+
+ decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
+ Information necessary to initiate the decoder. Can be either:
+
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
+ - A path to a *directory* containing model weights saved using
+ [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
+ - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
+ this case, `from_tf` should be set to `True` and a configuration object should be provided as
+ `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
+ PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
+
+ model_args (remaining positional arguments, *optional*):
+ All remaining positional arguments will be passed to the underlying model's `__init__` method.
+
+ kwargs (remaining dictionary of keyword arguments, *optional*):
+ Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
+ `output_attentions=True`).
+
+ - To update the encoder configuration, use the prefix *encoder_* for each configuration parameter.
+ - To update the decoder configuration, use the prefix *decoder_* for each configuration parameter.
+ - To update the parent model configuration, do not use a prefix for each configuration parameter.
+
+ Behaves differently depending on whether a `config` is provided or automatically loaded.
+
+ Example:
+
+ ```python
+ >>> from transformers import EncoderDecoderModel
+
+ >>> # initialize a bert2bert from two pretrained BERT models. Note that the cross-attention layers will be randomly initialized
+ >>> model = EncoderDecoderModel.from_encoder_decoder_pretrained("google-bert/bert-base-uncased", "google-bert/bert-base-uncased")
+ >>> # saving model after fine-tuning
+ >>> model.save_pretrained("./bert2bert")
+ >>> # load fine-tuned model
+ >>> model = EncoderDecoderModel.from_pretrained("./bert2bert")
+ ```"""
+
+ kwargs_encoder = {
+ argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_")
+ }
+
+ kwargs_decoder = {
+ argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
+ }
+
+ # remove encoder, decoder kwargs from kwargs
+ for key in kwargs_encoder.keys():
+ del kwargs["encoder_" + key]
+ for key in kwargs_decoder.keys():
+ del kwargs["decoder_" + key]
+
+ # Load and initialize the encoder and decoder
+ # The distinction between encoder and decoder at the model level is made
+ # by the value of the flag `is_decoder` that we need to set correctly.
+ encoder = kwargs_encoder.pop("model", None)
+ if encoder is None:
+ if encoder_pretrained_model_name_or_path is None:
+ raise ValueError(
+ "If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has "
+ "to be defined."
+ )
+
+ if "config" not in kwargs_encoder:
+ encoder_config, kwargs_encoder = AutoConfig.from_pretrained(
+ encoder_pretrained_model_name_or_path, **kwargs_encoder, return_unused_kwargs=True
+ )
+
+ if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:
+ logger.info(
+ f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model "
+ "from a decoder model. Cross-attention and casual mask are disabled."
+ )
+ encoder_config.is_decoder = False
+ encoder_config.add_cross_attention = False
+
+ kwargs_encoder["config"] = encoder_config
+
+ encoder = AutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder)
+
+ decoder = kwargs_decoder.pop("model", None)
+ if decoder is None:
+ if decoder_pretrained_model_name_or_path is None:
+ raise ValueError(
+ "If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has "
+ "to be defined."
+ )
+
+ if "config" not in kwargs_decoder:
+ decoder_config, kwargs_decoder = AutoConfig.from_pretrained(
+ decoder_pretrained_model_name_or_path, **kwargs_decoder, return_unused_kwargs=True
+ )
+
+ if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False:
+ logger.info(
+ f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention"
+ f" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if"
+ f" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers."
+ )
+ decoder_config.is_decoder = True
+ decoder_config.add_cross_attention = True
+
+ kwargs_decoder["config"] = decoder_config
+
+ if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False:
+ logger.warning(
+ f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. "
+ f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, "
+ "make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` "
+ "passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a "
+ "`decoder_config` to `.from_encoder_decoder_pretrained(...)`"
+ )
+
+ decoder = AutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
+
+ # instantiate config with corresponding kwargs
+ config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs)
+ return cls(encoder=encoder, decoder=decoder, config=config)
+
+ @add_start_docstrings_to_model_forward(ENCODER_DECODER_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
+ encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,
+ past_key_values: Tuple[Tuple[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs,
+ ) -> Union[Tuple, Seq2SeqLMOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import EncoderDecoderModel, BertTokenizer
+ >>> import torch
+
+ >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased")
+ >>> model = EncoderDecoderModel.from_encoder_decoder_pretrained(
+ ... "google-bert/bert-base-uncased", "google-bert/bert-base-uncased"
+ ... ) # initialize Bert2Bert from pre-trained checkpoints
+
+ >>> # training
+ >>> model.config.decoder_start_token_id = tokenizer.cls_token_id
+ >>> model.config.pad_token_id = tokenizer.pad_token_id
+ >>> model.config.vocab_size = model.config.decoder.vocab_size
+
+ >>> input_ids = tokenizer("This is a really long text", return_tensors="pt").input_ids
+ >>> labels = tokenizer("This is the corresponding summary", return_tensors="pt").input_ids
+ >>> outputs = model(input_ids=input_ids, labels=labels)
+ >>> loss, logits = outputs.loss, outputs.logits
+
+ >>> # save and load from pretrained
+ >>> model.save_pretrained("bert2bert")
+ >>> model = EncoderDecoderModel.from_pretrained("bert2bert")
+
+ >>> # generation
+ >>> generated = model.generate(input_ids)
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ kwargs_encoder = {argument: value for argument, value in kwargs.items() if not argument.startswith("decoder_")}
+
+ kwargs_decoder = {
+ argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
+ }
+
+ if encoder_outputs is None:
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ **kwargs_encoder,
+ )
+ elif isinstance(encoder_outputs, tuple):
+ encoder_outputs = BaseModelOutput(*encoder_outputs)
+
+ encoder_hidden_states = encoder_outputs[0]
+
+ # optionally project encoder_hidden_states
+ if (
+ self.encoder.config.hidden_size != self.decoder.config.hidden_size
+ and self.decoder.config.cross_attention_hidden_size is None
+ ):
+ encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states)
+
+ if (labels is not None) and (decoder_input_ids is None and decoder_inputs_embeds is None):
+ decoder_input_ids = shift_tokens_right(
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
+ )
+ if decoder_attention_mask is None:
+ decoder_attention_mask = decoder_input_ids.new_tensor(decoder_input_ids != self.config.pad_token_id)
+
+ # Decode
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=attention_mask,
+ inputs_embeds=decoder_inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ use_cache=use_cache,
+ past_key_values=past_key_values,
+ return_dict=return_dict,
+ **kwargs_decoder,
+ )
+
+ # Compute loss independent from decoder (as some shift the logits inside them)
+ loss = None
+ if labels is not None:
+ warnings.warn(DEPRECATION_WARNING, FutureWarning)
+ logits = decoder_outputs.logits if return_dict else decoder_outputs[0]
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.reshape(-1, self.decoder.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ if loss is not None:
+ return (loss,) + decoder_outputs + encoder_outputs
+ else:
+ return decoder_outputs + encoder_outputs
+
+ return Seq2SeqLMOutput(
+ loss=loss,
+ logits=decoder_outputs.logits,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+ def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
+ return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
+
+ def prepare_inputs_for_generation(
+ self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs
+ ):
+ decoder_inputs = self.decoder.prepare_inputs_for_generation(input_ids, past_key_values=past_key_values)
+ decoder_attention_mask = decoder_inputs["attention_mask"] if "attention_mask" in decoder_inputs else None
+ input_dict = {
+ "attention_mask": attention_mask,
+ "decoder_attention_mask": decoder_attention_mask,
+ "decoder_input_ids": decoder_inputs["input_ids"],
+ "encoder_outputs": encoder_outputs,
+ "past_key_values": decoder_inputs["past_key_values"],
+ "use_cache": use_cache,
+ }
+ return input_dict
+
+ def resize_token_embeddings(self, *args, **kwargs):
+ raise NotImplementedError(
+ "Resizing the embedding layers via the EncoderDecoderModel directly is not supported. Please use the"
+ " respective methods of the wrapped objects (model.encoder.resize_token_embeddings(...) or"
+ " model.decoder.resize_token_embeddings(...))"
+ )
+
+ def _reorder_cache(self, past_key_values, beam_idx):
+ # apply decoder cache reordering here
+ return self.decoder._reorder_cache(past_key_values, beam_idx)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/modeling_flax_encoder_decoder.py b/venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/modeling_flax_encoder_decoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..beecd080328e167a56e68b943c23822fa78d36b8
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/modeling_flax_encoder_decoder.py
@@ -0,0 +1,899 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Classes to support Flax Encoder-Decoder architectures"""
+
+
+import os
+from typing import Optional, Tuple, Union
+
+import flax.linen as nn
+import jax
+import jax.numpy as jnp
+from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
+from flax.traverse_util import flatten_dict, unflatten_dict
+from jax import lax
+from jax.random import PRNGKey
+
+from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutputWithCrossAttentions, FlaxSeq2SeqLMOutput
+from ...modeling_flax_utils import FlaxPreTrainedModel
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+from ..auto.configuration_auto import AutoConfig
+from ..auto.modeling_flax_auto import FlaxAutoModel, FlaxAutoModelForCausalLM
+from .configuration_encoder_decoder import EncoderDecoderConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "EncoderDecoderConfig"
+
+ENCODER_DECODER_START_DOCSTRING = r"""
+ This class can be used to initialize a sequence-to-sequence model with any pretrained autoencoding model as the
+ encoder and any pretrained autoregressive model as the decoder. The encoder is loaded via
+ [`~AutoModel.from_pretrained`] function and the decoder is loaded via [`~AutoModelForCausalLM.from_pretrained`]
+ function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream
+ generative task, like summarization.
+
+ The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation
+ tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation
+ Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi
+ Zhou, Wei Li, Peter J. Liu.
+
+ After such an Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models
+ (see the examples for more information).
+
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a Flax Linen
+ [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
+ regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
+
+ Parameters:
+ config ([`EncoderDecoderConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
+ `jax.numpy.bfloat16` (on TPUs).
+
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
+ specified all the computation will be performed with the given `dtype`.
+
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
+ parameters.**
+
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
+ [`~FlaxPreTrainedModel.to_bf16`].
+"""
+
+ENCODER_DECODER_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ For sequence to sequence training, `decoder_input_ids` should be provided. `decoder_input_ids` should be
+ created outside of the model by shifting the `labels` to the right, replacing -100 by the `pad_token_id`
+ and prepending them with the `decoder_start_token_id`.
+ decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.encoder.max_position_embeddings - 1]`.
+ decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
+ range `[0, config.decoder.max_position_embeddings - 1]`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ If set to `True`, the model will return a [`~utils.FlaxSeq2SeqLMOutput`] instead of a plain tuple.
+"""
+
+ENCODER_DECODER_ENCODE_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.encoder.max_position_embeddings - 1]`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ If set to `True`, the model will return a [`~utils.FlaxBaseModelOutput`] instead of a plain tuple.
+"""
+
+ENCODER_DECODER_DECODE_INPUTS_DOCSTRING = r"""
+ Args:
+ decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ For sequence to sequence training, `decoder_input_ids` should be provided. `decoder_input_ids` should be
+ created outside of the model by shifting the `labels` to the right, replacing -100 by the `pad_token_id`
+ and prepending them with the `decoder_start_token_id`.
+ encoder_outputs (`tuple(tuple(jnp.ndarray)`):
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+ decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
+ range `[0, config.decoder.max_position_embeddings - 1]`.
+ past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
+ Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
+ auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ If set to `True`, the model will return a [`~utils.FlaxCausalLMOutputWithCrossAttentions`] instead of a
+ plain tuple.
+"""
+
+
+class FlaxEncoderDecoderModule(nn.Module):
+ config: EncoderDecoderConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ encoder_config = self.config.encoder
+ decoder_config = self.config.decoder
+
+ # Copied from `modeling_hybrid_clip.py` with modifications.
+ from ...models.auto.modeling_flax_auto import FLAX_MODEL_FOR_CAUSAL_LM_MAPPING, FLAX_MODEL_MAPPING
+
+ encoder_module = FLAX_MODEL_MAPPING[encoder_config.__class__].module_class
+ decoder_module = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING[decoder_config.__class__].module_class
+
+ self.encoder = encoder_module(encoder_config, dtype=self.dtype)
+ self.decoder = decoder_module(decoder_config, dtype=self.dtype)
+
+ # encoder outputs might need to be projected to different dimension for decoder
+ if (
+ self.encoder.config.hidden_size != self.decoder.config.hidden_size
+ and self.decoder.config.cross_attention_hidden_size is None
+ ):
+ self.enc_to_dec_proj = nn.Dense(
+ self.decoder.config.hidden_size,
+ kernel_init=jax.nn.initializers.normal(self.decoder.config.initializer_range),
+ dtype=self.dtype,
+ )
+ else:
+ self.enc_to_dec_proj = None
+
+ def _get_encoder_module(self):
+ return self.encoder
+
+ def _get_projection_module(self):
+ return self.enc_to_dec_proj
+
+ def _get_decoder_module(self):
+ return self.decoder
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ decoder_input_ids,
+ decoder_attention_mask,
+ position_ids,
+ decoder_position_ids,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ deterministic: bool = True,
+ ):
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=deterministic,
+ )
+
+ encoder_hidden_states = encoder_outputs[0]
+
+ # optionally project encoder_hidden_states
+ if self.enc_to_dec_proj is not None:
+ encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states)
+
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ position_ids=decoder_position_ids,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=deterministic,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+
+ return FlaxSeq2SeqLMOutput(
+ logits=decoder_outputs.logits,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings(ENCODER_DECODER_START_DOCSTRING)
+class FlaxEncoderDecoderModel(FlaxPreTrainedModel):
+ r"""
+ [`FlaxEncoderDecoderModel`] is a generic model class that will be instantiated as a transformer architecture with
+ the module (flax.nn.Module) of one of the base model classes of the library as encoder module and another one as
+ decoder module when created with the :meth*~transformers.FlaxAutoModel.from_pretrained* class method for the
+ encoder and :meth*~transformers.FlaxAutoModelForCausalLM.from_pretrained* class method for the decoder.
+ """
+
+ config_class = EncoderDecoderConfig
+ base_model_prefix = "encoder_decoder"
+ module_class = FlaxEncoderDecoderModule
+
+ def __init__(
+ self,
+ config: EncoderDecoderConfig,
+ input_shape: Optional[Tuple] = None,
+ seed: int = 0,
+ dtype: jnp.dtype = jnp.float32,
+ _do_init: bool = True,
+ **kwargs,
+ ):
+ if input_shape is None:
+ input_shape = ((1, 1), (1, 1))
+
+ if not _do_init:
+ raise ValueError(
+ "`FlaxEncoderDecoderModel` cannot be created without initializing, `_do_init` must be `True`."
+ )
+
+ if config.decoder.cross_attention_hidden_size is not None:
+ if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size:
+ raise ValueError(
+ "If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal"
+ f" to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for"
+ f" `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for"
+ " `config.encoder.hidden_size`."
+ )
+
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
+
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
+ encoder_input_shape, decoder_input_shape = input_shape
+
+ # init input tensors
+ input_ids = jnp.zeros(encoder_input_shape, dtype="i4")
+ attention_mask = jnp.ones_like(input_ids)
+ decoder_input_ids = jnp.zeros(decoder_input_shape, dtype="i4")
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
+
+ batch_size, sequence_length = input_ids.shape
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
+
+ decoder_batch_size, decoder_sequence_length = decoder_input_ids.shape
+ if not decoder_batch_size == batch_size:
+ raise ValueError(
+ f"The inputs of encoder and decoder should have the same batch size, but got {batch_size} for encoder"
+ f" and {decoder_batch_size} for decoder."
+ )
+ decoder_position_ids = jnp.broadcast_to(
+ jnp.arange(decoder_sequence_length)[None, :], (decoder_batch_size, decoder_sequence_length)
+ )
+
+ params_rng, dropout_rng = jax.random.split(rng)
+ rngs = {"params": params_rng, "dropout": dropout_rng}
+
+ random_params = self.module.init(
+ rngs,
+ input_ids,
+ attention_mask,
+ decoder_input_ids,
+ decoder_attention_mask,
+ position_ids,
+ decoder_position_ids,
+ )["params"]
+
+ if params is not None:
+ random_params = flatten_dict(unfreeze(random_params))
+ params = flatten_dict(unfreeze(params))
+ for missing_key in self._missing_keys:
+ params[missing_key] = random_params[missing_key]
+ self._missing_keys = set()
+ return freeze(unflatten_dict(params))
+ else:
+ return random_params
+
+ def init_cache(self, batch_size, max_length, encoder_outputs):
+ r"""
+ Args:
+ batch_size (`int`):
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
+ max_length (`int`):
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
+ cache.
+ encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`):
+ `encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*:
+ `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*)
+ is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
+ cross-attention of the decoder.
+ """
+ # init input variables to retrieve cache
+ decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4")
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
+ decoder_position_ids = jnp.broadcast_to(
+ jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape
+ )
+
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
+ decoder_module = module._get_decoder_module()
+ return decoder_module(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ position_ids=decoder_position_ids,
+ **kwargs,
+ )
+
+ init_variables = self.module.init(
+ jax.random.PRNGKey(0),
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ decoder_position_ids=decoder_position_ids,
+ encoder_hidden_states=encoder_outputs[0],
+ init_cache=True,
+ method=_decoder_forward, # we only need to call the decoder to init the cache
+ )
+ return unfreeze(init_variables["cache"])
+
+ @add_start_docstrings(ENCODER_DECODER_ENCODE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=_CONFIG_FOR_DOC)
+ def encode(
+ self,
+ input_ids: jnp.ndarray,
+ attention_mask: Optional[jnp.ndarray] = None,
+ position_ids: Optional[jnp.ndarray] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ train: bool = False,
+ params: dict = None,
+ dropout_rng: PRNGKey = None,
+ ):
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import FlaxEncoderDecoderModel, BertTokenizer
+
+ >>> # initialize a bert2gpt2 from pretrained BERT and GPT2 models. Note that the cross-attention layers will be randomly initialized
+ >>> model = FlaxEncoderDecoderModel.from_encoder_decoder_pretrained("google-bert/bert-base-cased", "openai-community/gpt2")
+
+ >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased")
+
+ >>> text = "My friends are cool but they eat too many carbs."
+ >>> input_ids = tokenizer.encode(text, return_tensors="np")
+ >>> encoder_outputs = model.encode(input_ids)
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ if attention_mask is None:
+ attention_mask = jnp.ones_like(input_ids)
+ if position_ids is None:
+ batch_size, sequence_length = input_ids.shape
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
+
+ # Handle any PRNG if needed
+ rngs = {}
+ if dropout_rng is not None:
+ rngs["dropout"] = dropout_rng
+
+ def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs):
+ encode_module = module._get_encoder_module()
+ return encode_module(input_ids, attention_mask, position_ids, **kwargs)
+
+ outputs = self.module.apply(
+ {"params": params or self.params},
+ input_ids=jnp.array(input_ids, dtype="i4"),
+ attention_mask=jnp.array(attention_mask, dtype="i4"),
+ position_ids=jnp.array(position_ids, dtype="i4"),
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=not train,
+ rngs=rngs,
+ method=_encoder_forward,
+ )
+
+ if return_dict:
+ outputs = FlaxBaseModelOutput(
+ last_hidden_state=outputs.last_hidden_state,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ return outputs
+
+ @add_start_docstrings(ENCODER_DECODER_DECODE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
+ def decode(
+ self,
+ decoder_input_ids,
+ encoder_outputs,
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
+ decoder_position_ids: Optional[jnp.ndarray] = None,
+ past_key_values: dict = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ train: bool = False,
+ params: dict = None,
+ dropout_rng: PRNGKey = None,
+ ):
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import FlaxEncoderDecoderModel, BertTokenizer
+ >>> import jax.numpy as jnp
+
+ >>> # initialize a bert2gpt2 from pretrained BERT and GPT2 models. Note that the cross-attention layers will be randomly initialized
+ >>> model = FlaxEncoderDecoderModel.from_encoder_decoder_pretrained("google-bert/bert-base-cased", "openai-community/gpt2")
+
+ >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased")
+
+ >>> text = "My friends are cool but they eat too many carbs."
+ >>> input_ids = tokenizer.encode(text, max_length=1024, return_tensors="np")
+ >>> encoder_outputs = model.encode(input_ids)
+
+ >>> decoder_start_token_id = model.config.decoder.bos_token_id
+ >>> decoder_input_ids = jnp.ones((input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
+
+ >>> outputs = model.decode(decoder_input_ids, encoder_outputs)
+ >>> logits = outputs.logits
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ encoder_hidden_states = encoder_outputs[0]
+ if encoder_attention_mask is None:
+ batch_size, sequence_length = encoder_hidden_states.shape[:2]
+ encoder_attention_mask = jnp.ones((batch_size, sequence_length))
+
+ batch_size, sequence_length = decoder_input_ids.shape
+ if decoder_attention_mask is None:
+ decoder_attention_mask = jnp.ones((batch_size, sequence_length))
+
+ if decoder_position_ids is None:
+ if past_key_values is not None:
+ raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
+
+ decoder_position_ids = jnp.broadcast_to(
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
+ )
+
+ # Handle any PRNG if needed
+ rngs = {}
+ if dropout_rng is not None:
+ rngs["dropout"] = dropout_rng
+
+ inputs = {"params": params or self.params}
+
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be
+ # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
+ # it can be changed by FlaxBartAttention module
+ if past_key_values:
+ inputs["cache"] = past_key_values
+ mutable = ["cache"]
+ else:
+ mutable = False
+
+ def _decoder_forward(
+ module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, encoder_hidden_states, **kwargs
+ ):
+ projection_module = module._get_projection_module()
+ decoder_module = module._get_decoder_module()
+
+ # optionally project encoder_hidden_states
+ if projection_module is not None:
+ encoder_hidden_states = projection_module(encoder_hidden_states)
+
+ return decoder_module(
+ decoder_input_ids,
+ decoder_attention_mask,
+ decoder_position_ids,
+ encoder_hidden_states=encoder_hidden_states,
+ **kwargs,
+ )
+
+ outputs = self.module.apply(
+ inputs,
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=not train,
+ rngs=rngs,
+ mutable=mutable,
+ method=_decoder_forward,
+ )
+
+ # add updated cache to model output
+ if past_key_values is not None and return_dict:
+ outputs, past = outputs
+ outputs["past_key_values"] = unfreeze(past["cache"])
+ return outputs
+ elif past_key_values is not None and not return_dict:
+ outputs, past = outputs
+ outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
+
+ return outputs
+
+ @add_start_docstrings_to_model_forward(ENCODER_DECODER_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+ def __call__(
+ self,
+ input_ids: jnp.ndarray,
+ attention_mask: Optional[jnp.ndarray] = None,
+ decoder_input_ids: Optional[jnp.ndarray] = None,
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
+ position_ids: Optional[jnp.ndarray] = None,
+ decoder_position_ids: Optional[jnp.ndarray] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ train: bool = False,
+ params: dict = None,
+ dropout_rng: PRNGKey = None,
+ ):
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import FlaxEncoderDecoderModel, BertTokenizer, GPT2Tokenizer
+
+ >>> # load a fine-tuned bert2gpt2 model
+ >>> model = FlaxEncoderDecoderModel.from_pretrained("patrickvonplaten/bert2gpt2-cnn_dailymail-fp16")
+ >>> # load input & output tokenizer
+ >>> tokenizer_input = BertTokenizer.from_pretrained("google-bert/bert-base-cased")
+ >>> tokenizer_output = GPT2Tokenizer.from_pretrained("openai-community/gpt2")
+
+ >>> article = '''Sigma Alpha Epsilon is under fire for a video showing party-bound fraternity members
+ >>> singing a racist chant. SAE's national chapter suspended the students,
+ >>> but University of Oklahoma President David Boren took it a step further,
+ >>> saying the university's affiliation with the fraternity is permanently done.'''
+
+ >>> input_ids = tokenizer_input(article, add_special_tokens=True, return_tensors="np").input_ids
+
+ >>> # use GPT2's eos_token as the pad as well as eos token
+ >>> model.config.eos_token_id = model.config.decoder.eos_token_id
+ >>> model.config.pad_token_id = model.config.eos_token_id
+
+ >>> sequences = model.generate(input_ids, num_beams=4, max_length=12).sequences
+
+ >>> summary = tokenizer_output.batch_decode(sequences, skip_special_tokens=True)[0]
+ >>> assert summary == "SAS Alpha Epsilon suspended Sigma Alpha Epsilon members"
+ ```
+ """
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ # prepare encoder inputs
+ if attention_mask is None:
+ attention_mask = jnp.ones_like(input_ids)
+ if position_ids is None:
+ batch_size, sequence_length = input_ids.shape
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
+
+ # prepare decoder inputs
+ if decoder_input_ids is None:
+ raise ValueError(
+ "`decoder_input_ids` cannot be `None`. For sequence to sequence training, `decoder_position_ids` must"
+ " be specified as an input argument."
+ )
+ if decoder_attention_mask is None:
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
+ if decoder_position_ids is None:
+ batch_size, sequence_length = decoder_input_ids.shape
+ decoder_position_ids = jnp.broadcast_to(
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
+ )
+
+ # Handle any PRNG if needed
+ rngs = {"dropout": dropout_rng} if dropout_rng is not None else {}
+
+ return self.module.apply(
+ {"params": params or self.params},
+ input_ids=jnp.array(input_ids, dtype="i4"),
+ attention_mask=jnp.array(attention_mask, dtype="i4"),
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
+ position_ids=jnp.array(position_ids, dtype="i4"),
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=not train,
+ rngs=rngs,
+ )
+
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ max_length,
+ attention_mask: Optional[jax.Array] = None,
+ decoder_attention_mask: Optional[jax.Array] = None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ # initializing the cache
+ batch_size, seq_length = decoder_input_ids.shape
+
+ past_key_values = self.init_cache(batch_size, max_length, encoder_outputs)
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
+ # But since the decoder uses a causal mask, those positions are masked anyways.
+ # Thus we can create a single static attention_mask here, which is more efficient for compilation
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
+ if decoder_attention_mask is not None:
+ decoder_position_ids = decoder_attention_mask.cumsum(axis=-1) - 1
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0))
+ else:
+ decoder_position_ids = jnp.broadcast_to(
+ jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length)
+ )
+
+ return {
+ "past_key_values": past_key_values,
+ "encoder_outputs": encoder_outputs,
+ "encoder_attention_mask": attention_mask,
+ "decoder_attention_mask": extended_attention_mask,
+ "decoder_position_ids": decoder_position_ids,
+ }
+
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
+ model_kwargs["decoder_position_ids"] = model_kwargs["decoder_position_ids"][:, -1:] + 1
+ return model_kwargs
+
+ @classmethod
+ def from_encoder_decoder_pretrained(
+ cls,
+ encoder_pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None,
+ decoder_pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None,
+ *model_args,
+ **kwargs,
+ ) -> FlaxPreTrainedModel:
+ r"""
+ Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model
+ checkpoints.
+
+ Params:
+ encoder_pretrained_model_name_or_path (`Union[str, os.PathLike]`, *optional*):
+ Information necessary to initiate the encoder. Can be either:
+
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
+ - A path to a *directory* containing model weights saved using
+ [`~FlaxPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
+
+ decoder_pretrained_model_name_or_path (`Union[str, os.PathLike]`, *optional*, defaults to `None`):
+ Information necessary to initiate the decoder. Can be either:
+
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
+ - A path to a *directory* containing model weights saved using
+ [`~FlaxPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
+
+ model_args (remaining positional arguments, *optional*):
+ All remaning positional arguments will be passed to the underlying model's `__init__` method.
+
+ kwargs (remaining dictionary of keyword arguments, *optional*):
+ Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
+ `output_attentions=True`).
+
+ - To update the encoder configuration, use the prefix *encoder_* for each configuration parameter.
+ - To update the decoder configuration, use the prefix *decoder_* for each configuration parameter.
+ - To update the parent model configuration, do not use a prefix for each configuration parameter.
+
+ Behaves differently depending on whether a `config` is provided or automatically loaded.
+
+ Example:
+
+ ```python
+ >>> from transformers import FlaxEncoderDecoderModel
+
+ >>> # initialize a bert2gpt2 from pretrained BERT and GPT2 models. Note that the cross-attention layers will be randomly initialized
+ >>> model = FlaxEncoderDecoderModel.from_encoder_decoder_pretrained("google-bert/bert-base-cased", "openai-community/gpt2")
+ >>> # saving model after fine-tuning
+ >>> model.save_pretrained("./bert2gpt2")
+ >>> # load fine-tuned model
+ >>> model = FlaxEncoderDecoderModel.from_pretrained("./bert2gpt2")
+ ```"""
+
+ kwargs_encoder = {
+ argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_")
+ }
+
+ kwargs_decoder = {
+ argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
+ }
+
+ # remove encoder, decoder kwargs from kwargs
+ for key in kwargs_encoder.keys():
+ del kwargs["encoder_" + key]
+ for key in kwargs_decoder.keys():
+ del kwargs["decoder_" + key]
+
+ # Load and initialize the encoder and decoder
+ # The distinction between encoder and decoder at the model level is made
+ # by the value of the flag `is_decoder` that we need to set correctly.
+ encoder = kwargs_encoder.pop("model", None)
+ if encoder is None:
+ if encoder_pretrained_model_name_or_path is None:
+ raise ValueError(
+ "If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has "
+ "to be defined."
+ )
+
+ if "config" not in kwargs_encoder:
+ encoder_config, kwargs_encoder = AutoConfig.from_pretrained(
+ encoder_pretrained_model_name_or_path, **kwargs_encoder, return_unused_kwargs=True
+ )
+ if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:
+ logger.info(
+ f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model "
+ "from a decoder model. Cross-attention and casual mask are disabled."
+ )
+ encoder_config.is_decoder = False
+ encoder_config.add_cross_attention = False
+
+ kwargs_encoder["config"] = encoder_config
+
+ encoder = FlaxAutoModel.from_pretrained(
+ encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder
+ )
+
+ decoder = kwargs_decoder.pop("model", None)
+ if decoder is None:
+ if decoder_pretrained_model_name_or_path is None:
+ raise ValueError(
+ "If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has "
+ "to be defined."
+ )
+
+ if "config" not in kwargs_decoder:
+ decoder_config, kwargs_decoder = AutoConfig.from_pretrained(
+ decoder_pretrained_model_name_or_path, **kwargs_decoder, return_unused_kwargs=True
+ )
+ if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False:
+ logger.info(
+ f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention"
+ f" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if"
+ f" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers."
+ )
+ decoder_config.is_decoder = True
+ decoder_config.add_cross_attention = True
+
+ kwargs_decoder["config"] = decoder_config
+
+ if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False:
+ logger.warning(
+ f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. "
+ f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, "
+ "make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` "
+ "passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a "
+ "`decoder_config` to `.from_encoder_decoder_pretrained(...)`"
+ )
+
+ decoder = FlaxAutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
+
+ # instantiate config with corresponding kwargs
+ dtype = kwargs.pop("dtype", jnp.float32)
+ config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs)
+
+ # init model
+ model = cls(config, dtype=dtype)
+ model.params["encoder"] = encoder.params
+ model.params["decoder"] = decoder.params
+
+ return model
diff --git a/venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py b/venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..855fb767d13d73173365034f485b43174ed583d9
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py
@@ -0,0 +1,663 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Classes to support TF Encoder-Decoder architectures"""
+
+
+from __future__ import annotations
+
+import inspect
+import re
+import warnings
+from typing import Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ...configuration_utils import PretrainedConfig
+from ...modeling_tf_outputs import TFBaseModelOutput, TFSeq2SeqLMOutput
+from ...modeling_tf_utils import (
+ TFCausalLanguageModelingLoss,
+ TFModelInputType,
+ TFPreTrainedModel,
+ get_initializer,
+ keras,
+ unpack_inputs,
+)
+from ...tf_utils import shape_list
+from ...utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from ..auto.configuration_auto import AutoConfig
+from ..auto.modeling_tf_auto import TFAutoModel, TFAutoModelForCausalLM
+from .configuration_encoder_decoder import EncoderDecoderConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "EncoderDecoderConfig"
+
+DEPRECATION_WARNING = (
+ "Version v4.17.0 introduces a better way to train encoder-decoder models by computing the loss inside the"
+ " encoder-decoder framework rather than in the decoder itself. You may observe training discrepancies if"
+ " fine-tuning a model trained with versions anterior to 4.17.0. The decoder_input_ids are now created based on the"
+ " labels, no need to pass them yourself anymore."
+)
+
+ENCODER_DECODER_START_DOCSTRING = r"""
+ This class can be used to initialize a sequence-to-sequence model with any pretrained autoencoding model as the
+ encoder and any pretrained autoregressive model as the decoder. The encoder is loaded via
+ [`~TFAutoModel.from_pretrained`] function and the decoder is loaded via [`~TFAutoModelForCausalLM.from_pretrained`]
+ function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream
+ generative task, like summarization.
+
+ The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation
+ tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation
+ Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi
+ Zhou, Wei Li, Peter J. Liu.
+
+ After such an Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models
+ (see the examples for more information).
+
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`EncoderDecoderConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+ENCODER_DECODER_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`np.ndarray` or `tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ Provide for sequence to sequence training to the decoder. Indices can be obtained using
+ [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for
+ details.
+ decoder_attention_mask (`np.ndarray` or `tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+ encoder_outputs (`tuple(tuple(tf.Tensor)`, *optional*):
+ This tuple must consist of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
+ `last_hidden_state` (`tf.Tensor` of shape `({0}, hidden_size)`) is a tensor of hidden-states at the output
+ of the last layer of the encoder. Used in the cross-attention of the decoder.
+ past_key_values (`tuple(tuple(tf.Tensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `({0})`.
+ inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ decoder_inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
+ representation. This is useful if you want more control over how to convert `decoder_input_ids` indices
+ into associated vectors than the model's internal embedding lookup matrix.
+ labels (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
+ Labels for computing the masked language modeling loss for the decoder. Indices should be in `[-100, 0,
+ ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ If set to `True`, the model will return a [`~utils.Seq2SeqLMOutput`] instead of a plain tuple.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+ kwargs (*optional*): Remaining dictionary of keyword arguments. Keyword arguments come in two flavors:
+
+ - Without a prefix which will be input as `**encoder_kwargs` for the encoder forward function.
+ - With a *decoder_* prefix which will be input as `**decoder_kwargs`` for the decoder forward function.
+"""
+
+
+def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int):
+ if pad_token_id is None:
+ raise ValueError("Make sure to set the pad_token_id attribute of the model's configuration.")
+ pad_token_id = tf.cast(pad_token_id, input_ids.dtype)
+
+ if decoder_start_token_id is None:
+ raise ValueError("Make sure to set the decoder_start_token_id attribute of the model's configuration.")
+ decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype)
+
+ start_tokens = tf.fill((shape_list(input_ids)[0], 1), decoder_start_token_id)
+ shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)
+ # replace possible -100 values in labels by `pad_token_id`
+ shifted_input_ids = tf.where(
+ shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids
+ )
+
+ # "Verify that `labels` has only positive values and -100"
+ assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype))
+
+ # Make sure the assertion op is called by wrapping the result in an identity no-op
+ with tf.control_dependencies([assert_gte0]):
+ shifted_input_ids = tf.identity(shifted_input_ids)
+
+ return shifted_input_ids
+
+
+@add_start_docstrings(ENCODER_DECODER_START_DOCSTRING)
+class TFEncoderDecoderModel(TFPreTrainedModel, TFCausalLanguageModelingLoss):
+ r"""
+ [`TFEncoderDecoderModel`] is a generic model class that will be instantiated as a transformer architecture with one
+ of the base model classes of the library as encoder and another one as decoder when created with the
+ [`~TFAutoModel.from_pretrained`] class method for the encoder and [`~TFAutoModelForCausalLM.from_pretrained`] class
+ method for the decoder.
+ """
+
+ config_class = EncoderDecoderConfig
+ base_model_prefix = "encoder_decoder"
+ load_weight_prefix = "tf_encoder_decoder_model"
+
+ def __init__(
+ self,
+ config: Optional[PretrainedConfig] = None,
+ encoder: Optional[TFPreTrainedModel] = None,
+ decoder: Optional[TFPreTrainedModel] = None,
+ ):
+ if config is None and (encoder is None or decoder is None):
+ raise ValueError("Either a configuration or an encoder and a decoder has to be provided.")
+ if config is None:
+ config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config)
+ else:
+ if not isinstance(config, self.config_class):
+ raise ValueError(f"config: {config} has to be of type {self.config_class}")
+
+ if config.decoder.cross_attention_hidden_size is not None:
+ if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size:
+ raise ValueError(
+ "If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal"
+ f" to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for"
+ f" `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for"
+ " `config.encoder.hidden_size`."
+ )
+
+ # initialize with config
+ super().__init__(config)
+
+ if encoder is None:
+ encoder = TFAutoModel.from_config(config.encoder, name="encoder")
+
+ if decoder is None:
+ decoder = TFAutoModelForCausalLM.from_config(config.decoder, name="decoder")
+
+ self.encoder = encoder
+ self.decoder = decoder
+
+ if self.encoder.config.to_dict() != self.config.encoder.to_dict():
+ logger.warning(
+ f"Config of the encoder: {self.encoder.__class__} is overwritten by shared encoder config:"
+ f" {self.config.encoder}"
+ )
+ if self.decoder.config.to_dict() != self.config.decoder.to_dict():
+ logger.warning(
+ f"Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config:"
+ f" {self.config.decoder}"
+ )
+
+ # make sure that the individual model's config refers to the shared config
+ # so that the updates to the config will be synced
+ self.encoder.config = self.config.encoder
+ self.decoder.config = self.config.decoder
+
+ # encoder outputs might need to be projected to different dimension for decoder
+ if (
+ self.encoder.config.hidden_size != self.decoder.config.hidden_size
+ and self.decoder.config.cross_attention_hidden_size is None
+ ):
+ self.enc_to_dec_proj = keras.layers.Dense(
+ units=self.decoder.config.hidden_size,
+ kernel_initializer=get_initializer(config.encoder.initializer_range),
+ name="enc_to_dec_proj",
+ )
+
+ if self.encoder.get_output_embeddings() is not None:
+ raise ValueError(
+ f"The encoder {self.encoder} should not have a LM Head. Please use a model without LM Head"
+ )
+
+ decoder_signature = set(inspect.signature(self.decoder.call).parameters.keys())
+ if "encoder_hidden_states" not in decoder_signature:
+ raise ValueError(
+ "The selected decoder is not prepared for the encoder hidden states to be passed. Please see the "
+ "following discussion on GitHub: https://github.com/huggingface/transformers/issues/23350"
+ )
+
+ def get_encoder(self):
+ return self.encoder
+
+ def get_decoder(self):
+ return self.decoder
+
+ def get_input_embeddings(self):
+ return self.encoder.get_input_embeddings()
+
+ def get_output_embeddings(self):
+ return self.decoder.get_output_embeddings()
+
+ def set_output_embeddings(self, new_embeddings):
+ return self.decoder.set_output_embeddings(new_embeddings)
+
+ def tf_to_pt_weight_rename(self, tf_weight):
+ # Matt: The TF and PT weights don't align because our TF base classes have an extra layer compared to PT models
+ # (the main model stem is in the MainLayer class). If we remove that layer, then weight names sync up as normal.
+ # However, the name of that extra layer is the name of the MainLayer in the base model. We make the assumption
+ # here that the config model_type is the same as the name of the MainLayer. I don't know of anywhere that's
+ # not the case, and I wasn't sure how else to go from the config to the correct MainLayer name!
+
+ # This override is only needed in the case where we're crossloading weights from PT. However, since weights are
+ # often safetensors now, we don't know if we're going to be crossloading until we sniff the weights file.
+ # Therefore, we specify tf_to_pt_weight_rename anyway, and let the super method figure out if it needs it
+ # or not.
+ encoder_model_type = self.config.encoder.model_type
+ if "encoder" in tf_weight and "decoder" not in tf_weight:
+ return (re.sub(rf"encoder\.{encoder_model_type}\.", "encoder.", tf_weight),)
+ else:
+ return (tf_weight,)
+
+ @classmethod
+ def from_encoder_decoder_pretrained(
+ cls,
+ encoder_pretrained_model_name_or_path: str = None,
+ decoder_pretrained_model_name_or_path: str = None,
+ *model_args,
+ **kwargs,
+ ) -> TFPreTrainedModel:
+ r"""
+ Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model
+ checkpoints.
+
+
+ Params:
+ encoder_pretrained_model_name_or_path (`str`, *optional*):
+ Information necessary to initiate the encoder. Can be either:
+
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
+ - A path to a *directory* containing model weights saved using
+ [`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
+ - A path or url to a *pytorch index checkpoint file* (e.g, `./pt_model/`). In this case,
+ `encoder_from_pt` should be set to `True`.
+
+ decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
+ Information necessary to initiate the decoder. Can be either:
+
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
+ - A path to a *directory* containing model weights saved using
+ [`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
+ - A path or url to a *pytorch checkpoint file* (e.g, `./pt_model/`). In this case,
+ `decoder_from_pt` should be set to `True`.
+
+ model_args (remaining positional arguments, *optional*):
+ All remaning positional arguments will be passed to the underlying model's `__init__` method.
+
+ kwargs (remaining dictionary of keyword arguments, *optional*):
+ Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
+ `output_attentions=True`).
+
+ - To update the encoder configuration, use the prefix *encoder_* for each configuration parameter.
+ - To update the decoder configuration, use the prefix *decoder_* for each configuration parameter.
+ - To update the parent model configuration, do not use a prefix for each configuration parameter.
+
+ Behaves differently depending on whether a `config` is provided or automatically loaded.
+
+ Example:
+
+ ```python
+ >>> from transformers import TFEncoderDecoderModel
+
+ >>> # initialize a bert2gpt2 from two pretrained BERT models. Note that the cross-attention layers will be randomly initialized
+ >>> model = TFEncoderDecoderModel.from_encoder_decoder_pretrained("google-bert/bert-base-uncased", "openai-community/gpt2")
+ >>> # saving model after fine-tuning
+ >>> model.save_pretrained("./bert2gpt2")
+ >>> # load fine-tuned model
+ >>> model = TFEncoderDecoderModel.from_pretrained("./bert2gpt2")
+ ```"""
+
+ kwargs_encoder = {
+ argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_")
+ }
+
+ kwargs_decoder = {
+ argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
+ }
+
+ # remove encoder, decoder kwargs from kwargs
+ for key in kwargs_encoder.keys():
+ del kwargs["encoder_" + key]
+ for key in kwargs_decoder.keys():
+ del kwargs["decoder_" + key]
+
+ # Load and initialize the encoder and decoder
+ # The distinction between encoder and decoder at the model level is made
+ # by the value of the flag `is_decoder` that we need to set correctly.
+ encoder = kwargs_encoder.pop("model", None)
+ if encoder is None:
+ if encoder_pretrained_model_name_or_path is None:
+ raise ValueError(
+ "If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has "
+ "to be defined."
+ )
+
+ if "config" not in kwargs_encoder:
+ encoder_config = AutoConfig.from_pretrained(encoder_pretrained_model_name_or_path)
+ if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:
+ logger.info(
+ f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model "
+ "from a decoder model. Cross-attention and casual mask are disabled."
+ )
+ encoder_config.is_decoder = False
+ encoder_config.add_cross_attention = False
+
+ kwargs_encoder["config"] = encoder_config
+
+ kwargs_encoder["name"] = "encoder"
+ kwargs_encoder["load_weight_prefix"] = cls.load_weight_prefix
+ encoder = TFAutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder)
+
+ decoder = kwargs_decoder.pop("model", None)
+ if decoder is None:
+ if decoder_pretrained_model_name_or_path is None:
+ raise ValueError(
+ "If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has "
+ "to be defined."
+ )
+
+ if "config" not in kwargs_decoder:
+ decoder_config = AutoConfig.from_pretrained(decoder_pretrained_model_name_or_path)
+ if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False:
+ logger.info(
+ f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention"
+ f" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if"
+ f" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers."
+ )
+ decoder_config.is_decoder = True
+ decoder_config.add_cross_attention = True
+
+ kwargs_decoder["config"] = decoder_config
+
+ if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False:
+ logger.warning(
+ f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. "
+ f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, "
+ "make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` "
+ "passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a "
+ "`decoder_config` to `.from_encoder_decoder_pretrained(...)`"
+ )
+
+ kwargs_decoder["name"] = "decoder"
+ kwargs_decoder["load_weight_prefix"] = cls.load_weight_prefix
+ decoder = TFAutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
+
+ # Make sure these 2 `keras.Model` have fixed names so `from_pretrained` could load model weights correctly.
+ if encoder.name != "encoder":
+ raise ValueError("encoder model must be created with the name `encoder`.")
+ if decoder.name != "decoder":
+ raise ValueError("decoder model must be created with the name `decoder`.")
+
+ # instantiate config with corresponding kwargs
+ config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs)
+ return cls(encoder=encoder, decoder=decoder, config=config)
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(ENCODER_DECODER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ decoder_input_ids: np.ndarray | tf.Tensor | None = None,
+ decoder_attention_mask: np.ndarray | tf.Tensor | None = None,
+ encoder_outputs: np.ndarray | tf.Tensor | None = None,
+ past_key_values: Tuple[Tuple[tf.Tensor]] | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ decoder_inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ **kwargs,
+ ) -> Union[TFSeq2SeqLMOutput, Tuple[tf.Tensor]]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import TFEncoderDecoderModel, BertTokenizer
+
+ >>> # initialize a bert2gpt2 from a pretrained BERT and GPT2 models. Note that the cross-attention layers will be randomly initialized
+ >>> model = TFEncoderDecoderModel.from_encoder_decoder_pretrained("google-bert/bert-base-cased", "openai-community/gpt2")
+
+ >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased")
+
+ >>> # forward
+ >>> input_ids = tokenizer.encode(
+ ... "Hello, my dog is cute", add_special_tokens=True, return_tensors="tf"
+ ... ) # Batch size 1
+ >>> outputs = model(input_ids=input_ids, decoder_input_ids=input_ids)
+
+ >>> # training
+ >>> outputs = model(input_ids=input_ids, decoder_input_ids=input_ids, labels=input_ids)
+ >>> loss, logits = outputs.loss, outputs.logits
+
+ >>> # save and load from pretrained
+ >>> model.save_pretrained("bert2gpt2")
+ >>> model = TFEncoderDecoderModel.from_pretrained("bert2gpt2")
+
+ >>> # generation
+ >>> generated = model.generate(input_ids, decoder_start_token_id=model.config.decoder.bos_token_id)
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ kwargs_encoder = {argument: value for argument, value in kwargs.items() if not argument.startswith("decoder_")}
+
+ kwargs_decoder = {
+ argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
+ }
+
+ # Let the user be responsible for the expected format.
+ if encoder_outputs is not None:
+ if return_dict and not isinstance(encoder_outputs, ModelOutput):
+ raise ValueError(
+ "If `return_dict=True` and `encoder_outputs` is provided, it should be an instance of "
+ f"`ModelOutput`. Got an instance {type(encoder_outputs)} for `encoder_outputs`."
+ )
+
+ if encoder_outputs is None:
+ encoder_inputs = {
+ "input_ids": input_ids,
+ "attention_mask": attention_mask,
+ "inputs_embeds": inputs_embeds,
+ "output_attentions": output_attentions,
+ "output_hidden_states": output_hidden_states,
+ "return_dict": return_dict,
+ "training": training,
+ }
+
+ # Add arguments to encoder from `kwargs_encoder`
+ encoder_inputs.update(kwargs_encoder)
+
+ # Handle the case where the inputs are passed as a single dict which contains `labels`.
+ # The `labels` shouldn't be passed to `self.encoder` below, because it is a based model without this
+ # parameter (otherwise, an error occurs when `input_processing` is called inside `self.encoder.call()`).
+ if "labels" in encoder_inputs:
+ labels = encoder_inputs.pop("labels")
+
+ # handle the init case where `dummy_inputs` returns a dict containing `decoder_input_ids`.
+ if "decoder_input_ids" in encoder_inputs:
+ decoder_input_ids = encoder_inputs.pop("decoder_input_ids")
+ # handle the init case where `dummy_inputs` returns a dict containing `decoder_input_ids`.
+ if "decoder_attention_mask" in encoder_inputs:
+ decoder_attention_mask = encoder_inputs.pop("decoder_attention_mask")
+
+ encoder_outputs = self.encoder(**encoder_inputs)
+
+ encoder_hidden_states = encoder_outputs[0]
+
+ # optionally project encoder_hidden_states
+ if (
+ self.encoder.config.hidden_size != self.decoder.config.hidden_size
+ and self.decoder.config.cross_attention_hidden_size is None
+ ):
+ encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states)
+
+ if (labels is not None) and (decoder_input_ids is None and decoder_inputs_embeds is None):
+ decoder_input_ids = shift_tokens_right(
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
+ )
+
+ decoder_inputs = {
+ "input_ids": decoder_input_ids,
+ "attention_mask": decoder_attention_mask,
+ "encoder_hidden_states": encoder_hidden_states,
+ "encoder_attention_mask": attention_mask,
+ "inputs_embeds": decoder_inputs_embeds,
+ "output_attentions": output_attentions,
+ "output_hidden_states": output_hidden_states,
+ "use_cache": use_cache,
+ "past_key_values": past_key_values,
+ "return_dict": return_dict,
+ "training": training,
+ }
+
+ # Add arguments to decoder from `kwargs_decoder`
+ decoder_inputs.update(kwargs_decoder)
+
+ decoder_outputs = self.decoder(**decoder_inputs)
+
+ logits = decoder_outputs[0]
+
+ # Compute loss independent from decoder (as some shift the logits inside them)
+ loss = None
+ if labels is not None:
+ warnings.warn(DEPRECATION_WARNING, FutureWarning)
+ loss = self.hf_compute_loss(labels, logits)
+
+ if not return_dict:
+ past_key_values = None
+ if use_cache:
+ past_key_values = decoder_outputs[1]
+ # The starting index of the remaining elements in `decoder_outputs`
+ start_index = sum([1 if x is not None else 0 for x in (loss, logits, past_key_values)])
+
+ if not isinstance(encoder_outputs, tuple):
+ encoder_outputs = encoder_outputs.to_tuple()
+ output = (loss, logits, past_key_values) + decoder_outputs[start_index:] + encoder_outputs
+ output = tuple([x for x in output if x is not None])
+ return output
+
+ return TFSeq2SeqLMOutput(
+ loss=loss,
+ logits=decoder_outputs.logits,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs
+ ):
+ decoder_inputs = self.decoder.prepare_inputs_for_generation(input_ids, past_key_values=past_key_values)
+ decoder_attention_mask = decoder_inputs["attention_mask"] if "attention_mask" in decoder_inputs else None
+ past_key_values = decoder_inputs.get("past_key_values")
+ if past_key_values is None:
+ past_key_values = decoder_inputs.get("past") # e.g. on TF GPT2
+ input_dict = {
+ "input_ids": None, # needs to be passed to make Keras.layer.__call__ happy
+ "attention_mask": attention_mask,
+ "decoder_attention_mask": decoder_attention_mask,
+ "decoder_input_ids": decoder_inputs["input_ids"],
+ # TODO (joao): the `TFBaseModelOutput` wrapper should not be needed after the generate refactor is complete
+ "encoder_outputs": TFBaseModelOutput(last_hidden_state=encoder_outputs[0]),
+ "past_key_values": past_key_values,
+ "use_cache": use_cache,
+ }
+ return input_dict
+
+ def prepare_decoder_input_ids_from_labels(self, labels: tf.Tensor):
+ return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
+
+ def resize_token_embeddings(self, *args, **kwargs):
+ raise NotImplementedError(
+ "Resizing the embedding layers via the TFEncoderDecoderModel directly is not supported.Please use the"
+ " respective methods of the wrapped objects (model.encoder.resize_token_embeddings(...) or"
+ " model.decoder.resize_token_embeddings(...))"
+ )
+
+ def _reorder_cache(self, past, beam_idx):
+ # apply decoder cache reordering here
+ return self.decoder._reorder_cache(past, beam_idx)
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "enc_to_dec_proj", None) is not None:
+ with tf.name_scope(self.enc_to_dec_proj.name):
+ self.enc_to_dec_proj.build([None, None, self.encoder.config.hidden_size])
+ if getattr(self, "encoder", None) is not None:
+ with tf.name_scope(self.encoder.name):
+ self.encoder.build(None)
+ if getattr(self, "decoder", None) is not None:
+ with tf.name_scope(self.decoder.name):
+ self.decoder.build(None)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/ibert/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/ibert/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..637eb08eaf412d136e2e8ccf7a1d7d92147d364f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/ibert/__init__.py
@@ -0,0 +1,62 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
+
+
+_import_structure = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_ibert"] = [
+ "IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "IBertForMaskedLM",
+ "IBertForMultipleChoice",
+ "IBertForQuestionAnswering",
+ "IBertForSequenceClassification",
+ "IBertForTokenClassification",
+ "IBertModel",
+ "IBertPreTrainedModel",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_ibert import (
+ IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ IBertForMaskedLM,
+ IBertForMultipleChoice,
+ IBertForQuestionAnswering,
+ IBertForSequenceClassification,
+ IBertForTokenClassification,
+ IBertModel,
+ IBertPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/ibert/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/ibert/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..78d0442dd41d516e72589fa1d88613dc836e2b05
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/ibert/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/ibert/__pycache__/configuration_ibert.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/ibert/__pycache__/configuration_ibert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c06a4f9542878f28e76aac39920de4bcc17fa364
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/ibert/__pycache__/configuration_ibert.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/ibert/__pycache__/modeling_ibert.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/ibert/__pycache__/modeling_ibert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..252824bb7e0f045ea1f108092d2f5f561f149a16
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/ibert/__pycache__/modeling_ibert.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/ibert/__pycache__/quant_modules.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/ibert/__pycache__/quant_modules.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..36dad3e7bac5532e285e063ba90bd041ee3bbb75
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/ibert/__pycache__/quant_modules.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/ibert/configuration_ibert.py b/venv/lib/python3.10/site-packages/transformers/models/ibert/configuration_ibert.py
new file mode 100644
index 0000000000000000000000000000000000000000..94e040d417ef8dc81df1b97d736f43ebc098309e
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/ibert/configuration_ibert.py
@@ -0,0 +1,141 @@
+# coding=utf-8
+# Copyright 2021 The I-BERT Authors (Sehoon Kim, Amir Gholami, Zhewei Yao,
+# Michael Mahoney, Kurt Keutzer - UC Berkeley) and The HuggingFace Inc. team.
+# Copyright (c) 20121, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" I-BERT configuration"""
+from collections import OrderedDict
+from typing import Mapping
+
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class IBertConfig(PretrainedConfig):
+ """
+ This is the configuration class to store the configuration of a [`IBertModel`]. It is used to instantiate a I-BERT
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the IBERT
+ [kssteven/ibert-roberta-base](https://huggingface.co/kssteven/ibert-roberta-base) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 30522):
+ Vocabulary size of the I-BERT model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`IBertModel`]
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ type_vocab_size (`int`, *optional*, defaults to 2):
+ The vocabulary size of the `token_type_ids` passed when calling [`IBertModel`]
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
+ quant_mode (`bool`, *optional*, defaults to `False`):
+ Whether to quantize the model or not.
+ force_dequant (`str`, *optional*, defaults to `"none"`):
+ Force dequantize specific nonlinear layer. Dequatized layers are then executed with full precision.
+ `"none"`, `"gelu"`, `"softmax"`, `"layernorm"` and `"nonlinear"` are supported. As deafult, it is set as
+ `"none"`, which does not dequantize any layers. Please specify `"gelu"`, `"softmax"`, or `"layernorm"` to
+ dequantize GELU, Softmax, or LayerNorm, respectively. `"nonlinear"` will dequantize all nonlinear layers,
+ i.e., GELU, Softmax, and LayerNorm.
+ """
+
+ model_type = "ibert"
+
+ def __init__(
+ self,
+ vocab_size=30522,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.1,
+ attention_probs_dropout_prob=0.1,
+ max_position_embeddings=512,
+ type_vocab_size=2,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ pad_token_id=1,
+ bos_token_id=0,
+ eos_token_id=2,
+ position_embedding_type="absolute",
+ quant_mode=False,
+ force_dequant="none",
+ **kwargs,
+ ):
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
+
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.hidden_act = hidden_act
+ self.intermediate_size = intermediate_size
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.max_position_embeddings = max_position_embeddings
+ self.type_vocab_size = type_vocab_size
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.position_embedding_type = position_embedding_type
+ self.quant_mode = quant_mode
+ self.force_dequant = force_dequant
+
+
+class IBertOnnxConfig(OnnxConfig):
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ if self.task == "multiple-choice":
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
+ else:
+ dynamic_axis = {0: "batch", 1: "sequence"}
+ return OrderedDict(
+ [
+ ("input_ids", dynamic_axis),
+ ("attention_mask", dynamic_axis),
+ ]
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/ibert/modeling_ibert.py b/venv/lib/python3.10/site-packages/transformers/models/ibert/modeling_ibert.py
new file mode 100644
index 0000000000000000000000000000000000000000..54c37f507e3a63983c87ebb408327a5650431934
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/ibert/modeling_ibert.py
@@ -0,0 +1,1353 @@
+# coding=utf-8
+# Copyright 2021 The I-BERT Authors (Sehoon Kim, Amir Gholami, Zhewei Yao,
+# Michael Mahoney, Kurt Keutzer - UC Berkeley) and The HuggingFace Inc. team.
+# Copyright (c) 20121, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""PyTorch I-BERT model."""
+
+import math
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import gelu
+from ...modeling_outputs import (
+ BaseModelOutputWithPastAndCrossAttentions,
+ BaseModelOutputWithPoolingAndCrossAttentions,
+ MaskedLMOutput,
+ MultipleChoiceModelOutput,
+ QuestionAnsweringModelOutput,
+ SequenceClassifierOutput,
+ TokenClassifierOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
+from .configuration_ibert import IBertConfig
+from .quant_modules import IntGELU, IntLayerNorm, IntSoftmax, QuantAct, QuantEmbedding, QuantLinear
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "kssteven/ibert-roberta-base"
+_CONFIG_FOR_DOC = "IBertConfig"
+
+
+from ..deprecated._archive_maps import IBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+class IBertEmbeddings(nn.Module):
+ """
+ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ self.quant_mode = config.quant_mode
+ self.embedding_bit = 8
+ self.embedding_act_bit = 16
+ self.act_bit = 8
+ self.ln_input_bit = 22
+ self.ln_output_bit = 32
+
+ self.word_embeddings = QuantEmbedding(
+ config.vocab_size,
+ config.hidden_size,
+ padding_idx=config.pad_token_id,
+ weight_bit=self.embedding_bit,
+ quant_mode=self.quant_mode,
+ )
+ self.token_type_embeddings = QuantEmbedding(
+ config.type_vocab_size, config.hidden_size, weight_bit=self.embedding_bit, quant_mode=self.quant_mode
+ )
+
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
+ )
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
+
+ # End copy
+ self.padding_idx = config.pad_token_id
+ self.position_embeddings = QuantEmbedding(
+ config.max_position_embeddings,
+ config.hidden_size,
+ padding_idx=self.padding_idx,
+ weight_bit=self.embedding_bit,
+ quant_mode=self.quant_mode,
+ )
+
+ # Integer-only addition between embeddings
+ self.embeddings_act1 = QuantAct(self.embedding_act_bit, quant_mode=self.quant_mode)
+ self.embeddings_act2 = QuantAct(self.embedding_act_bit, quant_mode=self.quant_mode)
+
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
+ # any TensorFlow checkpoint file
+ self.LayerNorm = IntLayerNorm(
+ config.hidden_size,
+ eps=config.layer_norm_eps,
+ output_bit=self.ln_output_bit,
+ quant_mode=self.quant_mode,
+ force_dequant=config.force_dequant,
+ )
+ self.output_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(
+ self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
+ ):
+ if position_ids is None:
+ if input_ids is not None:
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
+ position_ids = create_position_ids_from_input_ids(
+ input_ids, self.padding_idx, past_key_values_length
+ ).to(input_ids.device)
+ else:
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
+
+ if input_ids is not None:
+ input_shape = input_ids.size()
+ else:
+ input_shape = inputs_embeds.size()[:-1]
+
+ if token_type_ids is None:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
+
+ if inputs_embeds is None:
+ inputs_embeds, inputs_embeds_scaling_factor = self.word_embeddings(input_ids)
+ else:
+ inputs_embeds_scaling_factor = None
+ token_type_embeddings, token_type_embeddings_scaling_factor = self.token_type_embeddings(token_type_ids)
+
+ embeddings, embeddings_scaling_factor = self.embeddings_act1(
+ inputs_embeds,
+ inputs_embeds_scaling_factor,
+ identity=token_type_embeddings,
+ identity_scaling_factor=token_type_embeddings_scaling_factor,
+ )
+
+ if self.position_embedding_type == "absolute":
+ position_embeddings, position_embeddings_scaling_factor = self.position_embeddings(position_ids)
+ embeddings, embeddings_scaling_factor = self.embeddings_act1(
+ embeddings,
+ embeddings_scaling_factor,
+ identity=position_embeddings,
+ identity_scaling_factor=position_embeddings_scaling_factor,
+ )
+
+ embeddings, embeddings_scaling_factor = self.LayerNorm(embeddings, embeddings_scaling_factor)
+ embeddings = self.dropout(embeddings)
+ embeddings, embeddings_scaling_factor = self.output_activation(embeddings, embeddings_scaling_factor)
+ return embeddings, embeddings_scaling_factor
+
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds):
+ """
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
+
+ Args:
+ inputs_embeds: torch.Tensor
+
+ Returns: torch.Tensor
+ """
+ input_shape = inputs_embeds.size()[:-1]
+ sequence_length = input_shape[1]
+
+ position_ids = torch.arange(
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
+ )
+ return position_ids.unsqueeze(0).expand(input_shape)
+
+
+class IBertSelfAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads})"
+ )
+ self.quant_mode = config.quant_mode
+ self.weight_bit = 8
+ self.bias_bit = 32
+ self.act_bit = 8
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ # Q, K, V Linear layers
+ self.query = QuantLinear(
+ config.hidden_size,
+ self.all_head_size,
+ bias=True,
+ weight_bit=self.weight_bit,
+ bias_bit=self.bias_bit,
+ quant_mode=self.quant_mode,
+ per_channel=True,
+ )
+ self.key = QuantLinear(
+ config.hidden_size,
+ self.all_head_size,
+ bias=True,
+ weight_bit=self.weight_bit,
+ bias_bit=self.bias_bit,
+ quant_mode=self.quant_mode,
+ per_channel=True,
+ )
+ self.value = QuantLinear(
+ config.hidden_size,
+ self.all_head_size,
+ bias=True,
+ weight_bit=self.weight_bit,
+ bias_bit=self.bias_bit,
+ quant_mode=self.quant_mode,
+ per_channel=True,
+ )
+
+ # Requantization (32bit -> 8bit) for Q, K, V activations
+ self.query_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode)
+ self.key_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode)
+ self.value_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode)
+ self.output_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
+ if self.position_embedding_type != "absolute":
+ raise ValueError("I-BERT only supports 'absolute' for `config.position_embedding_type`")
+
+ self.softmax = IntSoftmax(self.act_bit, quant_mode=self.quant_mode, force_dequant=config.force_dequant)
+
+ def transpose_for_scores(self, x):
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(*new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ hidden_states,
+ hidden_states_scaling_factor,
+ attention_mask=None,
+ head_mask=None,
+ output_attentions=False,
+ ):
+ # Projection
+ mixed_query_layer, mixed_query_layer_scaling_factor = self.query(hidden_states, hidden_states_scaling_factor)
+ mixed_key_layer, mixed_key_layer_scaling_factor = self.key(hidden_states, hidden_states_scaling_factor)
+ mixed_value_layer, mixed_value_layer_scaling_factor = self.value(hidden_states, hidden_states_scaling_factor)
+
+ # Requantization
+ query_layer, query_layer_scaling_factor = self.query_activation(
+ mixed_query_layer, mixed_query_layer_scaling_factor
+ )
+ key_layer, key_layer_scaling_factor = self.key_activation(mixed_key_layer, mixed_key_layer_scaling_factor)
+ value_layer, value_layer_scaling_factor = self.value_activation(
+ mixed_value_layer, mixed_value_layer_scaling_factor
+ )
+
+ # Transpose
+ query_layer = self.transpose_for_scores(query_layer)
+ key_layer = self.transpose_for_scores(key_layer)
+ value_layer = self.transpose_for_scores(value_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+ scale = math.sqrt(self.attention_head_size)
+ attention_scores = attention_scores / scale
+ if self.quant_mode:
+ attention_scores_scaling_factor = query_layer_scaling_factor * key_layer_scaling_factor / scale
+ else:
+ attention_scores_scaling_factor = None
+
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in IBertModel forward() function)
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs, attention_probs_scaling_factor = self.softmax(
+ attention_scores, attention_scores_scaling_factor
+ )
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+ if attention_probs_scaling_factor is not None:
+ context_layer_scaling_factor = attention_probs_scaling_factor * value_layer_scaling_factor
+ else:
+ context_layer_scaling_factor = None
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(*new_context_layer_shape)
+
+ # requantization: 32-bit -> 8-bit
+ context_layer, context_layer_scaling_factor = self.output_activation(
+ context_layer, context_layer_scaling_factor
+ )
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+ output_scaling_factor = (
+ (context_layer_scaling_factor, attention_probs_scaling_factor)
+ if output_attentions
+ else (context_layer_scaling_factor,)
+ )
+
+ return outputs, output_scaling_factor
+
+
+class IBertSelfOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.quant_mode = config.quant_mode
+ self.act_bit = 8
+ self.weight_bit = 8
+ self.bias_bit = 32
+ self.ln_input_bit = 22
+ self.ln_output_bit = 32
+
+ self.dense = QuantLinear(
+ config.hidden_size,
+ config.hidden_size,
+ bias=True,
+ weight_bit=self.weight_bit,
+ bias_bit=self.bias_bit,
+ quant_mode=self.quant_mode,
+ per_channel=True,
+ )
+ self.ln_input_act = QuantAct(self.ln_input_bit, quant_mode=self.quant_mode)
+ self.LayerNorm = IntLayerNorm(
+ config.hidden_size,
+ eps=config.layer_norm_eps,
+ output_bit=self.ln_output_bit,
+ quant_mode=self.quant_mode,
+ force_dequant=config.force_dequant,
+ )
+ self.output_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states, hidden_states_scaling_factor, input_tensor, input_tensor_scaling_factor):
+ hidden_states, hidden_states_scaling_factor = self.dense(hidden_states, hidden_states_scaling_factor)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states, hidden_states_scaling_factor = self.ln_input_act(
+ hidden_states,
+ hidden_states_scaling_factor,
+ identity=input_tensor,
+ identity_scaling_factor=input_tensor_scaling_factor,
+ )
+ hidden_states, hidden_states_scaling_factor = self.LayerNorm(hidden_states, hidden_states_scaling_factor)
+
+ hidden_states, hidden_states_scaling_factor = self.output_activation(
+ hidden_states, hidden_states_scaling_factor
+ )
+ return hidden_states, hidden_states_scaling_factor
+
+
+class IBertAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.quant_mode = config.quant_mode
+ self.self = IBertSelfAttention(config)
+ self.output = IBertSelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.self.query = prune_linear_layer(self.self.query, index)
+ self.self.key = prune_linear_layer(self.self.key, index)
+ self.self.value = prune_linear_layer(self.self.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states,
+ hidden_states_scaling_factor,
+ attention_mask=None,
+ head_mask=None,
+ output_attentions=False,
+ ):
+ self_outputs, self_outputs_scaling_factor = self.self(
+ hidden_states,
+ hidden_states_scaling_factor,
+ attention_mask,
+ head_mask,
+ output_attentions,
+ )
+ attention_output, attention_output_scaling_factor = self.output(
+ self_outputs[0], self_outputs_scaling_factor[0], hidden_states, hidden_states_scaling_factor
+ )
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ outputs_scaling_factor = (attention_output_scaling_factor,) + self_outputs_scaling_factor[1:]
+ return outputs, outputs_scaling_factor
+
+
+class IBertIntermediate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.quant_mode = config.quant_mode
+ self.act_bit = 8
+ self.weight_bit = 8
+ self.bias_bit = 32
+ self.dense = QuantLinear(
+ config.hidden_size,
+ config.intermediate_size,
+ bias=True,
+ weight_bit=self.weight_bit,
+ bias_bit=self.bias_bit,
+ quant_mode=self.quant_mode,
+ per_channel=True,
+ )
+ if config.hidden_act != "gelu":
+ raise ValueError("I-BERT only supports 'gelu' for `config.hidden_act`")
+ self.intermediate_act_fn = IntGELU(quant_mode=self.quant_mode, force_dequant=config.force_dequant)
+ self.output_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode)
+
+ def forward(self, hidden_states, hidden_states_scaling_factor):
+ hidden_states, hidden_states_scaling_factor = self.dense(hidden_states, hidden_states_scaling_factor)
+ hidden_states, hidden_states_scaling_factor = self.intermediate_act_fn(
+ hidden_states, hidden_states_scaling_factor
+ )
+
+ # Requantization: 32bit -> 8-bit
+ hidden_states, hidden_states_scaling_factor = self.output_activation(
+ hidden_states, hidden_states_scaling_factor
+ )
+ return hidden_states, hidden_states_scaling_factor
+
+
+class IBertOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.quant_mode = config.quant_mode
+ self.act_bit = 8
+ self.weight_bit = 8
+ self.bias_bit = 32
+ self.ln_input_bit = 22
+ self.ln_output_bit = 32
+
+ self.dense = QuantLinear(
+ config.intermediate_size,
+ config.hidden_size,
+ bias=True,
+ weight_bit=self.weight_bit,
+ bias_bit=self.bias_bit,
+ quant_mode=self.quant_mode,
+ per_channel=True,
+ )
+ self.ln_input_act = QuantAct(self.ln_input_bit, quant_mode=self.quant_mode)
+ self.LayerNorm = IntLayerNorm(
+ config.hidden_size,
+ eps=config.layer_norm_eps,
+ output_bit=self.ln_output_bit,
+ quant_mode=self.quant_mode,
+ force_dequant=config.force_dequant,
+ )
+ self.output_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states, hidden_states_scaling_factor, input_tensor, input_tensor_scaling_factor):
+ hidden_states, hidden_states_scaling_factor = self.dense(hidden_states, hidden_states_scaling_factor)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states, hidden_states_scaling_factor = self.ln_input_act(
+ hidden_states,
+ hidden_states_scaling_factor,
+ identity=input_tensor,
+ identity_scaling_factor=input_tensor_scaling_factor,
+ )
+ hidden_states, hidden_states_scaling_factor = self.LayerNorm(hidden_states, hidden_states_scaling_factor)
+
+ hidden_states, hidden_states_scaling_factor = self.output_activation(
+ hidden_states, hidden_states_scaling_factor
+ )
+ return hidden_states, hidden_states_scaling_factor
+
+
+class IBertLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.quant_mode = config.quant_mode
+ self.act_bit = 8
+
+ self.seq_len_dim = 1
+ self.attention = IBertAttention(config)
+ self.intermediate = IBertIntermediate(config)
+ self.output = IBertOutput(config)
+
+ self.pre_intermediate_act = QuantAct(self.act_bit, quant_mode=self.quant_mode)
+ self.pre_output_act = QuantAct(self.act_bit, quant_mode=self.quant_mode)
+
+ def forward(
+ self,
+ hidden_states,
+ hidden_states_scaling_factor,
+ attention_mask=None,
+ head_mask=None,
+ output_attentions=False,
+ ):
+ self_attention_outputs, self_attention_outputs_scaling_factor = self.attention(
+ hidden_states,
+ hidden_states_scaling_factor,
+ attention_mask,
+ head_mask,
+ output_attentions=output_attentions,
+ )
+ attention_output = self_attention_outputs[0]
+ attention_output_scaling_factor = self_attention_outputs_scaling_factor[0]
+
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ layer_output, layer_output_scaling_factor = self.feed_forward_chunk(
+ attention_output, attention_output_scaling_factor
+ )
+ outputs = (layer_output,) + outputs
+
+ return outputs
+
+ def feed_forward_chunk(self, attention_output, attention_output_scaling_factor):
+ attention_output, attention_output_scaling_factor = self.pre_intermediate_act(
+ attention_output, attention_output_scaling_factor
+ )
+ intermediate_output, intermediate_output_scaling_factor = self.intermediate(
+ attention_output, attention_output_scaling_factor
+ )
+
+ intermediate_output, intermediate_output_scaling_factor = self.pre_output_act(
+ intermediate_output, intermediate_output_scaling_factor
+ )
+ layer_output, layer_output_scaling_factor = self.output(
+ intermediate_output, intermediate_output_scaling_factor, attention_output, attention_output_scaling_factor
+ )
+ return layer_output, layer_output_scaling_factor
+
+
+class IBertEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.quant_mode = config.quant_mode
+ self.layer = nn.ModuleList([IBertLayer(config) for _ in range(config.num_hidden_layers)])
+
+ def forward(
+ self,
+ hidden_states,
+ hidden_states_scaling_factor,
+ attention_mask=None,
+ head_mask=None,
+ output_attentions=False,
+ output_hidden_states=False,
+ return_dict=True,
+ ):
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+ all_cross_attentions = None # `config.add_cross_attention` is not supported
+ next_decoder_cache = None # `config.use_cache` is not supported
+
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+
+ layer_outputs = layer_module(
+ hidden_states,
+ hidden_states_scaling_factor,
+ attention_mask,
+ layer_head_mask,
+ output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ hidden_states,
+ next_decoder_cache,
+ all_hidden_states,
+ all_self_attentions,
+ all_cross_attentions,
+ ]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_decoder_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+class IBertPooler(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.quant_mode = config.quant_mode
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.activation = nn.Tanh()
+
+ def forward(self, hidden_states):
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(first_token_tensor)
+ pooled_output = self.activation(pooled_output)
+ return pooled_output
+
+
+class IBertPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = IBertConfig
+ base_model_prefix = "ibert"
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, (QuantLinear, nn.Linear)):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, (QuantEmbedding, nn.Embedding)):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, (IntLayerNorm, nn.LayerNorm)):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+ def resize_token_embeddings(self, new_num_tokens=None):
+ raise NotImplementedError("`resize_token_embeddings` is not supported for I-BERT.")
+
+
+IBERT_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`IBertConfig`]): Model configuration class with all the parameters of the
+ model. Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+IBERT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare I-BERT Model transformer outputting raw hidden-states without any specific head on top.",
+ IBERT_START_DOCSTRING,
+)
+class IBertModel(IBertPreTrainedModel):
+ """
+
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
+
+ """
+
+ def __init__(self, config, add_pooling_layer=True):
+ super().__init__(config)
+ self.config = config
+ self.quant_mode = config.quant_mode
+
+ self.embeddings = IBertEmbeddings(config)
+ self.encoder = IBertEncoder(config)
+
+ self.pooler = IBertPooler(config) if add_pooling_layer else None
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.word_embeddings = value
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithPoolingAndCrossAttentions,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[BaseModelOutputWithPoolingAndCrossAttentions, Tuple[torch.FloatTensor]]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ batch_size, seq_length = input_shape
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ if attention_mask is None:
+ attention_mask = torch.ones(((batch_size, seq_length)), device=device)
+ if token_type_ids is None:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
+
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
+ # ourselves in which case we just need to make it broadcastable to all heads.
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ embedding_output, embedding_output_scaling_factor = self.embeddings(
+ input_ids=input_ids,
+ position_ids=position_ids,
+ token_type_ids=token_type_ids,
+ inputs_embeds=inputs_embeds,
+ )
+ encoder_outputs = self.encoder(
+ embedding_output,
+ embedding_output_scaling_factor,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
+
+ if not return_dict:
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPoolingAndCrossAttentions(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ past_key_values=encoder_outputs.past_key_values,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ cross_attentions=encoder_outputs.cross_attentions,
+ )
+
+
+@add_start_docstrings("""I-BERT Model with a `language modeling` head on top.""", IBERT_START_DOCSTRING)
+class IBertForMaskedLM(IBertPreTrainedModel):
+ _tied_weights_keys = ["lm_head.decoder.bias", "lm_head.decoder.weight"]
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.ibert = IBertModel(config, add_pooling_layer=False)
+ self.lm_head = IBertLMHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.lm_head.decoder
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head.decoder = new_embeddings
+
+ @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=MaskedLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ mask="",
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[MaskedLMOutput, Tuple[torch.FloatTensor]]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ kwargs (`Dict[str, any]`, optional, defaults to *{}*):
+ Used to hide legacy arguments that have been deprecated.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.ibert(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = outputs[0]
+ prediction_scores = self.lm_head(sequence_output)
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[2:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return MaskedLMOutput(
+ loss=masked_lm_loss,
+ logits=prediction_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+class IBertLMHead(nn.Module):
+ """I-BERT Head for masked language modeling."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
+ self.decoder.bias = self.bias
+
+ def forward(self, features, **kwargs):
+ x = self.dense(features)
+ x = gelu(x)
+ x = self.layer_norm(x)
+
+ # project back to size of vocabulary with bias
+ x = self.decoder(x)
+
+ return x
+
+ def _tie_weights(self):
+ # To tie those two weights if they get disconnected (on TPU or when the bias is resized)
+ self.bias = self.decoder.bias
+
+
+@add_start_docstrings(
+ """
+ I-BERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
+ output) e.g. for GLUE tasks.
+ """,
+ IBERT_START_DOCSTRING,
+)
+class IBertForSequenceClassification(IBertPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.ibert = IBertModel(config, add_pooling_layer=False)
+ self.classifier = IBertClassificationHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=SequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[SequenceClassifierOutput, Tuple[torch.FloatTensor]]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.ibert(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = outputs[0]
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ I-BERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
+ softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ IBERT_START_DOCSTRING,
+)
+class IBertForMultipleChoice(IBertPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.ibert = IBertModel(config)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.classifier = nn.Linear(config.hidden_size, 1)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=MultipleChoiceModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[MultipleChoiceModelOutput, Tuple[torch.FloatTensor]]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
+ `input_ids` above)
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
+
+ flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
+ flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
+ flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
+ flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
+ flat_inputs_embeds = (
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
+ if inputs_embeds is not None
+ else None
+ )
+
+ outputs = self.ibert(
+ flat_input_ids,
+ position_ids=flat_position_ids,
+ token_type_ids=flat_token_type_ids,
+ attention_mask=flat_attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=flat_inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ pooled_output = outputs[1]
+
+ pooled_output = self.dropout(pooled_output)
+ logits = self.classifier(pooled_output)
+ reshaped_logits = logits.view(-1, num_choices)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(reshaped_logits, labels)
+
+ if not return_dict:
+ output = (reshaped_logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return MultipleChoiceModelOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ I-BERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
+ Named-Entity-Recognition (NER) tasks.
+ """,
+ IBERT_START_DOCSTRING,
+)
+class IBertForTokenClassification(IBertPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.ibert = IBertModel(config, add_pooling_layer=False)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[TokenClassifierOutput, Tuple[torch.FloatTensor]]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.ibert(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ sequence_output = self.dropout(sequence_output)
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+class IBertClassificationHead(nn.Module):
+ """Head for sentence-level classification tasks."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
+
+ def forward(self, features, **kwargs):
+ hidden_states = features[:, 0, :] # take token (equiv. to [CLS])
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.dense(hidden_states)
+ hidden_states = torch.tanh(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.out_proj(hidden_states)
+ return hidden_states
+
+
+@add_start_docstrings(
+ """
+ I-BERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ IBERT_START_DOCSTRING,
+)
+class IBertForQuestionAnswering(IBertPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.ibert = IBertModel(config, add_pooling_layer=False)
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=QuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ start_positions: Optional[torch.LongTensor] = None,
+ end_positions: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[QuestionAnsweringModelOutput, Tuple[torch.FloatTensor]]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.ibert(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = logits.split(1, dim=-1)
+ start_logits = start_logits.squeeze(-1).contiguous()
+ end_logits = end_logits.squeeze(-1).contiguous()
+
+ total_loss = None
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, split add a dimension
+ if len(start_positions.size()) > 1:
+ start_positions = start_positions.squeeze(-1)
+ if len(end_positions.size()) > 1:
+ end_positions = end_positions.squeeze(-1)
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
+ ignored_index = start_logits.size(1)
+ start_positions = start_positions.clamp(0, ignored_index)
+ end_positions = end_positions.clamp(0, ignored_index)
+
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[2:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return QuestionAnsweringModelOutput(
+ loss=total_loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
+ """
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
+ are ignored. This is modified from fairseq's *utils.make_positions*.
+
+ Args:
+ input_ids (`torch.LongTensor`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Returns: torch.Tensor
+ """
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
+ mask = input_ids.ne(padding_idx).int()
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
+ return incremental_indices.long() + padding_idx
diff --git a/venv/lib/python3.10/site-packages/transformers/models/ibert/quant_modules.py b/venv/lib/python3.10/site-packages/transformers/models/ibert/quant_modules.py
new file mode 100644
index 0000000000000000000000000000000000000000..8e2f123c578c0b4840b6d0e52d61af891abcd41d
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/ibert/quant_modules.py
@@ -0,0 +1,820 @@
+# coding=utf-8
+# Copyright 2021 The I-BERT Authors (Sehoon Kim, Amir Gholami, Zhewei Yao,
+# Michael Mahoney, Kurt Keutzer - UC Berkeley) and The HuggingFace Inc. team.
+# Copyright (c) 20121, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import decimal
+
+import numpy as np
+import torch
+from torch import nn
+from torch.autograd import Function
+
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class QuantEmbedding(nn.Module):
+ """
+ Quantized version of `torch.nn.Embedding`. Adds quantization-specific arguments on top of `torch.nn.Embedding`.
+
+ Args:
+ weight_bit (`int`, *optional*, defaults to `8`):
+ Bitwidth for the quantized weight.
+ momentum (`float`, *optional*, defaults to `0.95`):
+ Momentum for updating the activation quantization range.
+ quant_mode (`bool`, *optional*, defaults to `False`):
+ Whether or not the layer is quantized.
+ """
+
+ def __init__(
+ self,
+ num_embeddings,
+ embedding_dim,
+ padding_idx=None,
+ max_norm=None,
+ norm_type=2.0,
+ scale_grad_by_freq=False,
+ sparse=False,
+ _weight=None,
+ weight_bit=8,
+ momentum=0.95,
+ quant_mode=False,
+ ):
+ super().__init__()
+ self.num_ = num_embeddings
+ self.dim = embedding_dim
+ self.padding_idx = padding_idx
+ self.max_norm = max_norm
+ self.norm_type = norm_type
+ self.scale_grad_by_freq = scale_grad_by_freq
+ self.sparse = sparse
+
+ self.weight = nn.Parameter(torch.zeros([num_embeddings, embedding_dim]))
+ self.register_buffer("weight_scaling_factor", torch.zeros(1))
+ self.register_buffer("weight_integer", torch.zeros_like(self.weight))
+
+ self.weight_bit = weight_bit
+ self.momentum = momentum
+ self.quant_mode = quant_mode
+ self.percentile_mode = False
+ self.weight_function = SymmetricQuantFunction.apply
+
+ def forward(self, x, positions=None, incremental_state=None):
+ if not self.quant_mode:
+ return (
+ nn.functional.embedding(
+ x,
+ self.weight,
+ self.padding_idx,
+ self.max_norm,
+ self.norm_type,
+ self.scale_grad_by_freq,
+ self.sparse,
+ ),
+ None,
+ )
+
+ w = self.weight
+ w_transform = w.data.detach()
+ w_min = w_transform.min().expand(1)
+ w_max = w_transform.max().expand(1)
+
+ self.weight_scaling_factor = symmetric_linear_quantization_params(self.weight_bit, w_min, w_max, False)
+ self.weight_integer = self.weight_function(
+ self.weight, self.weight_bit, self.percentile_mode, self.weight_scaling_factor
+ )
+
+ emb_int = nn.functional.embedding(
+ x,
+ self.weight_integer,
+ self.padding_idx,
+ self.max_norm,
+ self.norm_type,
+ self.scale_grad_by_freq,
+ self.sparse,
+ )
+ return emb_int * self.weight_scaling_factor, self.weight_scaling_factor
+
+
+class QuantAct(nn.Module):
+ """
+ Quantizes the given activation.
+
+ Args:
+ activation_bit (`int`):
+ Bitwidth for the quantized activation.
+ act_range_momentum (`float`, *optional*, defaults to `0.95`):
+ Momentum for updating the activation quantization range.
+ per_channel (`bool`, *optional*, defaults to `False`):
+ Whether to or not use channel-wise quantization.
+ channel_len (`int`, *optional*):
+ Specify the channel length when set the *per_channel* True.
+ quant_mode (`bool`, *optional*, defaults to `False`):
+ Whether or not the layer is quantized.
+ """
+
+ def __init__(self, activation_bit, act_range_momentum=0.95, per_channel=False, channel_len=None, quant_mode=False):
+ super().__init__()
+
+ self.activation_bit = activation_bit
+ self.act_range_momentum = act_range_momentum
+ self.quant_mode = quant_mode
+ self.per_channel = per_channel
+ self.percentile = False
+ self.act_function = SymmetricQuantFunction.apply
+
+ if not self.per_channel:
+ self.register_buffer("x_min", torch.zeros(1))
+ self.register_buffer("x_max", torch.zeros(1))
+ self.register_buffer("act_scaling_factor", torch.zeros(1))
+ self.x_min -= 1e-5
+ self.x_max += 1e-5
+ else:
+ raise NotImplementedError("per-channel mode is not currently supported for activation.")
+
+ def __repr__(self):
+ return (
+ f"{self.__class__.__name__}(activation_bit={self.activation_bit}, "
+ f"quant_mode: {self.quant_mode}, Act_min: {self.x_min.item():.2f}, "
+ f"Act_max: {self.x_max.item():.2f})"
+ )
+
+ def forward(
+ self,
+ x,
+ pre_act_scaling_factor=None,
+ identity=None,
+ identity_scaling_factor=None,
+ specified_min=None,
+ specified_max=None,
+ ):
+ x_act = x if identity is None else identity + x
+ # collect running stats if training
+ if self.training:
+ assert not self.percentile, "percentile mode is not currently supported for activation."
+ assert not self.per_channel, "per-channel mode is not currently supported for activation."
+ x_min = x_act.data.min()
+ x_max = x_act.data.max()
+
+ assert (
+ x_max.isnan().sum() == 0 and x_min.isnan().sum() == 0
+ ), "NaN detected when computing min/max of the activation"
+
+ # Initialization
+ if self.x_min.min() > -1.1e-5 and self.x_max.max() < 1.1e-5:
+ self.x_min = self.x_min + x_min
+ self.x_max = self.x_max + x_max
+
+ # exponential moving average (EMA)
+ # use momentum to prevent the quantized values change greatly every iteration
+ elif self.act_range_momentum == -1:
+ self.x_min = torch.min(self.x_min, x_min)
+ self.x_max = torch.max(self.x_max, x_max)
+ else:
+ self.x_min = self.x_min * self.act_range_momentum + x_min * (1 - self.act_range_momentum)
+ self.x_max = self.x_max * self.act_range_momentum + x_max * (1 - self.act_range_momentum)
+
+ if not self.quant_mode:
+ return x_act, None
+
+ x_min = self.x_min if specified_min is None else specified_min
+ x_max = self.x_max if specified_max is None else specified_max
+
+ self.act_scaling_factor = symmetric_linear_quantization_params(
+ self.activation_bit, x_min, x_max, per_channel=self.per_channel
+ )
+
+ if pre_act_scaling_factor is None:
+ # this is for the input quantization
+ quant_act_int = self.act_function(x, self.activation_bit, self.percentile, self.act_scaling_factor)
+ else:
+ quant_act_int = FixedPointMul.apply(
+ x,
+ pre_act_scaling_factor,
+ self.activation_bit,
+ self.act_scaling_factor,
+ identity,
+ identity_scaling_factor,
+ )
+
+ correct_output_scale = self.act_scaling_factor.view(-1)
+
+ return quant_act_int * correct_output_scale, self.act_scaling_factor
+
+
+class QuantLinear(nn.Module):
+ """
+ Quantized version of `torch.nn.Linear`. Adds quantization-specific arguments on top of `torch.nn.Linear`.
+
+ Args:
+ weight_bit (`int`, *optional*, defaults to `8`):
+ Bitwidth for the quantized weight.
+ bias_bit (`int`, *optional*, defaults to `32`):
+ Bitwidth for the quantized bias.
+ per_channel (`bool`, *optional*, defaults to `False`):
+ Whether or not to use channel-wise quantization.
+ quant_mode (`bool`, *optional*, defaults to `False`):
+ Whether or not the layer is quantized.
+ """
+
+ def __init__(
+ self, in_features, out_features, bias=True, weight_bit=8, bias_bit=32, per_channel=False, quant_mode=False
+ ):
+ super().__init__()
+ self.in_features = in_features
+ self.out_features = out_features
+
+ self.weight = nn.Parameter(torch.zeros([out_features, in_features]))
+ self.register_buffer("weight_integer", torch.zeros_like(self.weight))
+ self.register_buffer("fc_scaling_factor", torch.zeros(self.out_features))
+ if bias:
+ self.bias = nn.Parameter(torch.zeros(out_features))
+ self.register_buffer("bias_integer", torch.zeros_like(self.bias))
+
+ self.weight_bit = weight_bit
+ self.quant_mode = quant_mode
+ self.per_channel = per_channel
+ self.bias_bit = bias_bit
+ self.quant_mode = quant_mode
+ self.percentile_mode = False
+ self.weight_function = SymmetricQuantFunction.apply
+
+ def __repr__(self):
+ s = super().__repr__()
+ s = f"({s} weight_bit={self.weight_bit}, quant_mode={self.quant_mode})"
+ return s
+
+ def forward(self, x, prev_act_scaling_factor=None):
+ if not self.quant_mode:
+ return nn.functional.linear(x, weight=self.weight, bias=self.bias), None
+
+ # assert that prev_act_scaling_factor is a scalar tensor
+ assert prev_act_scaling_factor is not None and prev_act_scaling_factor.shape == (1,), (
+ "Input activation to the QuantLinear layer should be globally (non-channel-wise) quantized. "
+ "Please add a QuantAct layer with `per_channel = True` before this QuantAct layer"
+ )
+
+ w = self.weight
+ w_transform = w.data.detach()
+ if self.per_channel:
+ w_min, _ = torch.min(w_transform, dim=1, out=None)
+ w_max, _ = torch.max(w_transform, dim=1, out=None)
+ else:
+ w_min = w_transform.min().expand(1)
+ w_max = w_transform.max().expand(1)
+
+ self.fc_scaling_factor = symmetric_linear_quantization_params(self.weight_bit, w_min, w_max, self.per_channel)
+ self.weight_integer = self.weight_function(
+ self.weight, self.weight_bit, self.percentile_mode, self.fc_scaling_factor
+ )
+
+ bias_scaling_factor = self.fc_scaling_factor * prev_act_scaling_factor
+
+ if self.bias is not None:
+ self.bias_integer = self.weight_function(self.bias, self.bias_bit, False, bias_scaling_factor)
+
+ prev_act_scaling_factor = prev_act_scaling_factor.view(1, -1)
+ x_int = x / prev_act_scaling_factor
+
+ return (
+ nn.functional.linear(x_int, weight=self.weight_integer, bias=self.bias_integer) * bias_scaling_factor,
+ bias_scaling_factor,
+ )
+
+
+class IntGELU(nn.Module):
+ """
+ Quantized version of `torch.nn.GELU`. Adds quantization-specific arguments on top of `torch.nn.GELU`.
+
+ Args:
+ quant_mode (`bool`, *optional*, defaults to `False`):
+ Whether or not the layer is quantized.
+ force_dequant (`str`, *optional*, defaults to `"none"`):
+ Force dequantize the layer if either "gelu" or "nonlinear" is given.
+ """
+
+ def __init__(self, quant_mode=True, force_dequant="none"):
+ super().__init__()
+ self.quant_mode = quant_mode
+
+ if force_dequant in ["nonlinear", "gelu"]:
+ logger.info("Force dequantize gelu")
+ self.quant_mode = False
+
+ if not self.quant_mode:
+ self.activation_fn = nn.GELU()
+
+ self.k = 1.4142
+ self.const = 14 # dummy integer constant
+ self.coeff = [-0.2888, -1.769, 1] # a(x+b)**2 + c
+ self.coeff[2] /= self.coeff[0]
+
+ def int_erf(self, x_int, scaling_factor):
+ b_int = torch.floor(self.coeff[1] / scaling_factor)
+ c_int = torch.floor(self.coeff[2] / scaling_factor**2)
+ sign = torch.sign(x_int)
+
+ abs_int = torch.min(torch.abs(x_int), -b_int)
+ y_int = sign * ((abs_int + b_int) ** 2 + c_int)
+ scaling_factor = scaling_factor**2 * self.coeff[0]
+
+ # avoid overflow
+ y_int = floor_ste.apply(y_int / 2**self.const)
+ scaling_factor = scaling_factor * 2**self.const
+
+ return y_int, scaling_factor
+
+ def forward(self, x, scaling_factor=None):
+ if not self.quant_mode:
+ return self.activation_fn(x), None
+
+ x_int = x / scaling_factor
+ sigmoid_int, sigmoid_scaling_factor = self.int_erf(x_int, scaling_factor / self.k)
+
+ shift_int = 1.0 // sigmoid_scaling_factor
+
+ x_int = x_int * (sigmoid_int + shift_int)
+ scaling_factor = scaling_factor * sigmoid_scaling_factor / 2
+
+ return x_int * scaling_factor, scaling_factor
+
+
+class IntSoftmax(nn.Module):
+ """
+ Quantized version of `torch.nn.Softmax`. Adds quantization-specific arguments on top of `torch.nn.Softmax`.
+
+ Args:
+ output_bit (`int`):
+ Bitwidth for the layer output activation.
+ quant_mode (`bool`, *optional*, defaults to `False`):
+ Whether or not the layer is quantized.
+ force_dequant (`str`, *optional*, defaults to `"none"`):
+ Force dequantize the layer if either "softmax" or "nonlinear" is given.
+ """
+
+ def __init__(self, output_bit, quant_mode=False, force_dequant="none"):
+ super().__init__()
+ self.output_bit = output_bit
+ self.max_bit = 32
+ self.quant_mode = quant_mode
+
+ if force_dequant in ["nonlinear", "softmax"]:
+ logger.info("Force dequantize softmax")
+ self.quant_mode = False
+
+ self.act = QuantAct(16, quant_mode=self.quant_mode)
+ self.x0 = -0.6931 # -ln2
+ self.const = 30 # dummy integer constant
+ self.coef = [0.35815147, 0.96963238, 1.0] # ax**2 + bx + c
+ self.coef[1] /= self.coef[0]
+ self.coef[2] /= self.coef[0]
+
+ def int_polynomial(self, x_int, scaling_factor):
+ with torch.no_grad():
+ b_int = torch.floor(self.coef[1] / scaling_factor)
+ c_int = torch.floor(self.coef[2] / scaling_factor**2)
+ z = (x_int + b_int) * x_int + c_int
+ scaling_factor = self.coef[0] * scaling_factor**2
+ return z, scaling_factor
+
+ def int_exp(self, x_int, scaling_factor):
+ with torch.no_grad():
+ x0_int = torch.floor(self.x0 / scaling_factor)
+ x_int = torch.max(x_int, self.const * x0_int)
+
+ q = floor_ste.apply(x_int / x0_int)
+ r = x_int - x0_int * q
+ exp_int, exp_scaling_factor = self.int_polynomial(r, scaling_factor)
+ exp_int = torch.clamp(floor_ste.apply(exp_int * 2 ** (self.const - q)), min=0)
+ scaling_factor = exp_scaling_factor / 2**self.const
+ return exp_int, scaling_factor
+
+ def forward(self, x, scaling_factor):
+ if not self.quant_mode:
+ return nn.functional.softmax(x, dim=-1), None
+
+ x_int = x / scaling_factor
+
+ x_int_max, _ = x_int.max(dim=-1, keepdim=True)
+ x_int = x_int - x_int_max
+ exp_int, exp_scaling_factor = self.int_exp(x_int, scaling_factor)
+
+ # Avoid overflow
+ exp, exp_scaling_factor = self.act(exp_int, exp_scaling_factor)
+ exp_int = exp / exp_scaling_factor
+
+ exp_int_sum = exp_int.sum(dim=-1, keepdim=True)
+ factor = floor_ste.apply(2**self.max_bit / exp_int_sum)
+ exp_int = floor_ste.apply(exp_int * factor / 2 ** (self.max_bit - self.output_bit))
+ scaling_factor = 1 / 2**self.output_bit
+ return exp_int * scaling_factor, scaling_factor
+
+
+class IntLayerNorm(nn.Module):
+ """
+ Quantized version of `torch.nn.LayerNorm`. Adds quantization-specific arguments on top of `torch.nn.LayerNorm`.
+
+ Args:
+ output_bit (`int`, *optional*, defaults to `8`):
+ Bitwidth for the layer output activation.
+ quant_mode (`bool`, *optional*, defaults to `False`):
+ Whether or not the layer is quantized.
+ force_dequant (`str`, *optional*, defaults to `"none"`):
+ Force dequantize the layer if either "layernorm" or "nonlinear" is given.
+ """
+
+ def __init__(self, normalized_shape, eps, output_bit=8, quant_mode=False, force_dequant="none"):
+ super().__init__()
+ self.normalized_shape = normalized_shape
+ self.eps = eps
+
+ self.weight = nn.Parameter(torch.zeros(normalized_shape))
+ self.bias = nn.Parameter(torch.zeros(normalized_shape))
+
+ self.quant_mode = quant_mode
+ if force_dequant in ["nonlinear", "layernorm"]:
+ logger.info("Force dequantize layernorm")
+ self.quant_mode = False
+
+ self.register_buffer("shift", torch.zeros(1))
+ self.output_bit = output_bit
+ self.max_bit = 32
+ self.dim_sqrt = None
+ self.activation = QuantAct(self.output_bit, quant_mode=self.quant_mode)
+
+ def set_shift(self, y_int):
+ with torch.no_grad():
+ y_sq_int = y_int**2
+ var_int = torch.sum(y_sq_int, axis=2, keepdim=True)
+ shift = (torch.log2(torch.sqrt(var_int / 2**self.max_bit)).ceil()).max()
+ shift_old = self.shift
+ self.shift = torch.max(self.shift, shift)
+ logger.info(f"Dynamic shift adjustment: {int(shift_old)} -> {int(self.shift)}")
+
+ def overflow_fallback(self, y_int):
+ """
+ This fallback function is called when overflow is detected during training time, and adjusts the `self.shift`
+ to avoid overflow in the subsequent runs.
+ """
+ self.set_shift(y_int) # adjusts `self.shift`
+ y_int_shifted = floor_ste.apply(y_int / 2**self.shift)
+ y_sq_int = y_int_shifted**2
+ var_int = torch.sum(y_sq_int, axis=2, keepdim=True)
+ return var_int
+
+ def forward(self, x, scaling_factor=None):
+ if not self.quant_mode:
+ mean = x.mean(axis=2, keepdim=True)
+ y = x - mean
+ var = torch.mean(y**2, axis=2, keepdim=True)
+ x = y / torch.sqrt(self.eps + var)
+ x = x * self.weight + self.bias
+ return x, None
+
+ # compute sqrt of the feature dimension if it is the first run
+ if self.dim_sqrt is None:
+ n = torch.tensor(x.shape[2], dtype=torch.float)
+ self.dim_sqrt = torch.sqrt(n).to(x.device)
+
+ # Normalization: computes mean and variance(std)
+ x_int = x / scaling_factor
+ mean_int = round_ste.apply(x_int.mean(axis=2, keepdim=True))
+ y_int = x_int - mean_int
+ y_int_shifted = floor_ste.apply(y_int / 2**self.shift)
+ y_sq_int = y_int_shifted**2
+ var_int = torch.sum(y_sq_int, axis=2, keepdim=True)
+
+ # overflow handling in training time
+ if self.training:
+ # if overflow is detected
+ if var_int.max() >= 2**self.max_bit:
+ var_int = self.overflow_fallback(y_int)
+ assert var_int.max() < 2**self.max_bit + 0.1, (
+ "Error detected in overflow handling: "
+ "`var_int` exceeds `self.max_bit` (the maximum possible bit width)"
+ )
+
+ # To be replaced with integer-sqrt kernel that produces the same output
+ std_int = floor_ste.apply(torch.sqrt(var_int)) * 2**self.shift
+ factor = floor_ste.apply(2**31 / std_int)
+ y_int = floor_ste.apply(y_int * factor / 2)
+ scaling_factor = self.dim_sqrt / 2**30
+
+ # scaling and shifting
+ bias = self.bias.data.detach() / (self.weight.data.detach())
+ bias_int = floor_ste.apply(bias / scaling_factor)
+
+ y_int = y_int + bias_int
+ scaling_factor = scaling_factor * self.weight
+ x = y_int * scaling_factor
+
+ return x, scaling_factor
+
+
+def get_percentile_min_max(input, lower_percentile, upper_percentile, output_tensor=False):
+ """
+ Calculate the percentile max and min values in a given tensor
+
+ Args:
+ input (`torch.Tensor`):
+ The target tensor to calculate percentile max and min.
+ lower_percentile (`float`):
+ If 0.1, means we return the value of the smallest 0.1% value in the tensor as percentile min.
+ upper_percentile (`float`):
+ If 99.9, means we return the value of the largest 0.1% value in the tensor as percentile max.
+ output_tensor (`bool`, *optional*, defaults to `False`):
+ If True, this function returns tensors, otherwise it returns values.
+
+ Returns:
+ `Tuple(torch.Tensor, torch.Tensor)`: Percentile min and max value of *input*
+ """
+ input_length = input.shape[0]
+
+ lower_index = round(input_length * (1 - lower_percentile * 0.01))
+ upper_index = round(input_length * upper_percentile * 0.01)
+
+ upper_bound = torch.kthvalue(input, k=upper_index).values
+
+ if lower_percentile == 0:
+ lower_bound = upper_bound * 0
+ # lower_index += 1
+ else:
+ lower_bound = -torch.kthvalue(-input, k=lower_index).values
+
+ if not output_tensor:
+ lower_bound = lower_bound.item()
+ upper_bound = upper_bound.item()
+ return lower_bound, upper_bound
+
+
+def linear_quantize(input, scale, zero_point, inplace=False):
+ """
+ Quantize single-precision input tensor to integers with the given scaling factor and zeropoint.
+
+ Args:
+ input (`torch.Tensor`):
+ Single-precision input tensor to be quantized.
+ scale (`torch.Tensor`):
+ Scaling factor for quantization.
+ zero_pint (`torch.Tensor`):
+ Shift for quantization.
+ inplace (`bool`, *optional*, defaults to `False`):
+ Whether to compute inplace or not.
+
+ Returns:
+ `torch.Tensor`: Linearly quantized value of *input* according to *scale* and *zero_point*.
+ """
+ # reshape scale and zeropoint for convolutional weights and activation
+ if len(input.shape) == 4:
+ scale = scale.view(-1, 1, 1, 1)
+ zero_point = zero_point.view(-1, 1, 1, 1)
+ # reshape scale and zeropoint for linear weights
+ elif len(input.shape) == 2:
+ scale = scale.view(-1, 1)
+ zero_point = zero_point.view(-1, 1)
+ else:
+ scale = scale.view(-1)
+ zero_point = zero_point.view(-1)
+ # quantized = float / scale + zero_point
+ if inplace:
+ input.mul_(1.0 / scale).add_(zero_point).round_()
+ return input
+ return torch.round(1.0 / scale * input + zero_point)
+
+
+def symmetric_linear_quantization_params(num_bits, saturation_min, saturation_max, per_channel=False):
+ """
+ Compute the scaling factor with the given quantization range for symmetric quantization.
+
+ Args:
+ saturation_min (`torch.Tensor`):
+ Lower bound for quantization range.
+ saturation_max (`torch.Tensor`):
+ Upper bound for quantization range.
+ per_channel (`bool`, *optional*, defaults to `False`):
+ Whether to or not use channel-wise quantization.
+
+ Returns:
+ `torch.Tensor`: Scaling factor that linearly quantizes the given range between *saturation_min* and
+ *saturation_max*.
+ """
+ # in this part, we do not need any gradient computation,
+ # in order to enforce this, we put torch.no_grad()
+ with torch.no_grad():
+ n = 2 ** (num_bits - 1) - 1
+
+ if per_channel:
+ scale, _ = torch.max(torch.stack([saturation_min.abs(), saturation_max.abs()], dim=1), dim=1)
+ scale = torch.clamp(scale, min=1e-8) / n
+
+ else:
+ scale = max(saturation_min.abs(), saturation_max.abs())
+ scale = torch.clamp(scale, min=1e-8) / n
+
+ return scale
+
+
+class SymmetricQuantFunction(Function):
+ """
+ Class to quantize the given floating-point values using symmetric quantization with given range and bitwidth.
+ """
+
+ @staticmethod
+ def forward(ctx, x, k, percentile_mode, scale):
+ """
+ Args:
+ x (`torch.Tensor`):
+ Floating point tensor to be quantized.
+ k (`int`):
+ Quantization bitwidth.
+ percentile_mode (`bool`):
+ Whether or not to use percentile calibration.
+ scale (`torch.Tensor`):
+ Pre-calculated scaling factor for *x*. Note that the current implementation of SymmetricQuantFunction
+ requires pre-calculated scaling factor.
+
+ Returns:
+ `torch.Tensor`: Symmetric-quantized value of *input*.
+ """
+ zero_point = torch.tensor(0.0).to(scale.device)
+
+ n = 2 ** (k - 1) - 1
+ new_quant_x = linear_quantize(x, scale, zero_point, inplace=False)
+ new_quant_x = torch.clamp(new_quant_x, -n, n - 1)
+
+ ctx.scale = scale
+ return new_quant_x
+
+ @staticmethod
+ def backward(ctx, grad_output):
+ scale = ctx.scale
+ if len(grad_output.shape) == 4:
+ scale = scale.view(-1, 1, 1, 1)
+ # reshape scale and zeropoint for linear weights
+ elif len(grad_output.shape) == 2:
+ scale = scale.view(-1, 1)
+ else:
+ scale = scale.view(-1)
+
+ return grad_output.clone() / scale, None, None, None, None
+
+
+class floor_ste(Function):
+ """
+ Straight-through Estimator(STE) for torch.floor()
+ """
+
+ @staticmethod
+ def forward(ctx, x):
+ return torch.floor(x)
+
+ @staticmethod
+ def backward(ctx, grad_output):
+ return grad_output.clone()
+
+
+class round_ste(Function):
+ """
+ Straight-through Estimator(STE) for torch.round()
+ """
+
+ @staticmethod
+ def forward(ctx, x):
+ return torch.round(x)
+
+ @staticmethod
+ def backward(ctx, grad_output):
+ return grad_output.clone()
+
+
+def batch_frexp(inputs, max_bit=31):
+ """
+ Decompose the scaling factor into mantissa and twos exponent.
+
+ Args:
+ scaling_factor (`torch.Tensor`):
+ Target scaling factor to decompose.
+
+ Returns:
+ ``Tuple(torch.Tensor, torch.Tensor)`: mantisa and exponent
+ """
+
+ shape_of_input = inputs.size()
+
+ # trans the input to be a 1-d tensor
+ inputs = inputs.view(-1)
+
+ output_m, output_e = np.frexp(inputs.cpu().numpy())
+ tmp_m = []
+ for m in output_m:
+ int_m_shifted = int(
+ decimal.Decimal(m * (2**max_bit)).quantize(decimal.Decimal("1"), rounding=decimal.ROUND_HALF_UP)
+ )
+ tmp_m.append(int_m_shifted)
+ output_m = np.array(tmp_m)
+
+ output_e = float(max_bit) - output_e
+
+ return (
+ torch.from_numpy(output_m).to(inputs.device).view(shape_of_input),
+ torch.from_numpy(output_e).to(inputs.device).view(shape_of_input),
+ )
+
+
+class FixedPointMul(Function):
+ """
+ Function to perform fixed-point arithmetic that can match integer arithmetic on hardware.
+
+ Args:
+ pre_act (`torch.Tensor`):
+ Input tensor.
+ pre_act_scaling_factor (`torch.Tensor`):
+ Scaling factor of the input tensor *pre_act*.
+ bit_num (`int`):
+ Quantization bitwidth.
+ z_scaling_factor (`torch.Tensor`):
+ Scaling factor of the output tensor.
+ identity (`torch.Tensor`, *optional*):
+ Identity tensor, if exists.
+ identity_scaling_factor (`torch.Tensor`, *optional*):
+ Scaling factor of the identity tensor *identity*, if exists.
+
+ Returns:
+ `torch.Tensor`: Output tensor(*pre_act* if *identity* is not given, otherwise the addition of *pre_act* and
+ *identity*), whose scale is rescaled to *z_scaling_factor*.
+ """
+
+ @staticmethod
+ def forward(
+ ctx,
+ pre_act,
+ pre_act_scaling_factor,
+ bit_num,
+ z_scaling_factor,
+ identity=None,
+ identity_scaling_factor=None,
+ ):
+ if len(pre_act_scaling_factor.shape) == 3:
+ reshape = lambda x: x # noqa: E731
+ else:
+ reshape = lambda x: x.view(1, 1, -1) # noqa: E731
+ ctx.identity = identity
+
+ n = 2 ** (bit_num - 1) - 1
+
+ with torch.no_grad():
+ pre_act_scaling_factor = reshape(pre_act_scaling_factor)
+ if identity is not None:
+ identity_scaling_factor = reshape(identity_scaling_factor)
+
+ ctx.z_scaling_factor = z_scaling_factor
+
+ z_int = torch.round(pre_act / pre_act_scaling_factor)
+ _A = pre_act_scaling_factor.type(torch.double)
+ _B = (z_scaling_factor.type(torch.float)).type(torch.double)
+ new_scale = _A / _B
+ new_scale = reshape(new_scale)
+
+ m, e = batch_frexp(new_scale)
+
+ output = z_int.type(torch.double) * m.type(torch.double)
+ output = torch.round(output / (2.0**e))
+
+ if identity is not None:
+ # needs addition of identity activation
+ wx_int = torch.round(identity / identity_scaling_factor)
+
+ _A = identity_scaling_factor.type(torch.double)
+ _B = (z_scaling_factor.type(torch.float)).type(torch.double)
+ new_scale = _A / _B
+ new_scale = reshape(new_scale)
+
+ m1, e1 = batch_frexp(new_scale)
+ output1 = wx_int.type(torch.double) * m1.type(torch.double)
+ output1 = torch.round(output1 / (2.0**e1))
+
+ output = output1 + output
+
+ return torch.clamp(output.type(torch.float), -n - 1, n)
+
+ @staticmethod
+ def backward(ctx, grad_output):
+ identity_grad = None
+ if ctx.identity is not None:
+ identity_grad = grad_output.clone() / ctx.z_scaling_factor
+ return grad_output.clone() / ctx.z_scaling_factor, None, None, None, None, identity_grad, None
diff --git a/venv/lib/python3.10/site-packages/transformers/models/longformer/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/longformer/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..66ef7c953cff4385424b208313445962d4facf28
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/longformer/__init__.py
@@ -0,0 +1,135 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_tf_available,
+ is_tokenizers_available,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "configuration_longformer": [
+ "LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
+ "LongformerConfig",
+ "LongformerOnnxConfig",
+ ],
+ "tokenization_longformer": ["LongformerTokenizer"],
+}
+
+try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_longformer_fast"] = ["LongformerTokenizerFast"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_longformer"] = [
+ "LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "LongformerForMaskedLM",
+ "LongformerForMultipleChoice",
+ "LongformerForQuestionAnswering",
+ "LongformerForSequenceClassification",
+ "LongformerForTokenClassification",
+ "LongformerModel",
+ "LongformerPreTrainedModel",
+ "LongformerSelfAttention",
+ ]
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_longformer"] = [
+ "TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TFLongformerForMaskedLM",
+ "TFLongformerForMultipleChoice",
+ "TFLongformerForQuestionAnswering",
+ "TFLongformerForSequenceClassification",
+ "TFLongformerForTokenClassification",
+ "TFLongformerModel",
+ "TFLongformerPreTrainedModel",
+ "TFLongformerSelfAttention",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_longformer import (
+ LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
+ LongformerConfig,
+ LongformerOnnxConfig,
+ )
+ from .tokenization_longformer import LongformerTokenizer
+
+ try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_longformer_fast import LongformerTokenizerFast
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_longformer import (
+ LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
+ LongformerForMaskedLM,
+ LongformerForMultipleChoice,
+ LongformerForQuestionAnswering,
+ LongformerForSequenceClassification,
+ LongformerForTokenClassification,
+ LongformerModel,
+ LongformerPreTrainedModel,
+ LongformerSelfAttention,
+ )
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_longformer import (
+ TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TFLongformerForMaskedLM,
+ TFLongformerForMultipleChoice,
+ TFLongformerForQuestionAnswering,
+ TFLongformerForSequenceClassification,
+ TFLongformerForTokenClassification,
+ TFLongformerModel,
+ TFLongformerPreTrainedModel,
+ TFLongformerSelfAttention,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..797c6f67a190c6dba98bf7d4a01d4a2b2fb2160a
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/configuration_longformer.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/configuration_longformer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1cb5d28cbc7163a74ccddfe33a701e8006ae384b
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/configuration_longformer.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/convert_longformer_original_pytorch_lightning_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/convert_longformer_original_pytorch_lightning_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a1229930b8fe9b7c33c46ce7eec5343a9386d0ca
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/convert_longformer_original_pytorch_lightning_to_pytorch.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/modeling_longformer.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/modeling_longformer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6e2b4dedef61e4d9b38c463c07c3f59c6a2c7cc7
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/modeling_longformer.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/modeling_tf_longformer.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/modeling_tf_longformer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1c2aea927bb7fd1cbd506d96ecb2accba55f0542
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/modeling_tf_longformer.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/tokenization_longformer.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/tokenization_longformer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..eaa93e86e0836d9f6b5829c2f9275978e85a5a70
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/tokenization_longformer.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/tokenization_longformer_fast.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/tokenization_longformer_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9789b03e2134dcac4dc169ffec9328368d5abb12
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/longformer/__pycache__/tokenization_longformer_fast.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/longformer/configuration_longformer.py b/venv/lib/python3.10/site-packages/transformers/models/longformer/configuration_longformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..7dce8a74a631c797297931278648508f01bafb4d
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/longformer/configuration_longformer.py
@@ -0,0 +1,203 @@
+# coding=utf-8
+# Copyright 2020 The Allen Institute for AI team and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Longformer configuration"""
+from collections import OrderedDict
+from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
+
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxConfig
+from ...utils import TensorType, logging
+
+
+if TYPE_CHECKING:
+ from ...onnx.config import PatchingSpec
+ from ...tokenization_utils_base import PreTrainedTokenizerBase
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class LongformerConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`LongformerModel`] or a [`TFLongformerModel`]. It
+ is used to instantiate a Longformer model according to the specified arguments, defining the model architecture.
+
+ This is the configuration class to store the configuration of a [`LongformerModel`]. It is used to instantiate an
+ Longformer model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the LongFormer
+ [allenai/longformer-base-4096](https://huggingface.co/allenai/longformer-base-4096) architecture with a sequence
+ length 4,096.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 30522):
+ Vocabulary size of the Longformer model. Defines the number of different tokens that can be represented by
+ the `inputs_ids` passed when calling [`LongformerModel`] or [`TFLongformerModel`].
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ type_vocab_size (`int`, *optional*, defaults to 2):
+ The vocabulary size of the `token_type_ids` passed when calling [`LongformerModel`] or
+ [`TFLongformerModel`].
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ attention_window (`int` or `List[int]`, *optional*, defaults to 512):
+ Size of an attention window around each token. If an `int`, use the same size for all layers. To specify a
+ different window size for each layer, use a `List[int]` where `len(attention_window) == num_hidden_layers`.
+
+ Example:
+
+ ```python
+ >>> from transformers import LongformerConfig, LongformerModel
+
+ >>> # Initializing a Longformer configuration
+ >>> configuration = LongformerConfig()
+
+ >>> # Initializing a model from the configuration
+ >>> model = LongformerModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "longformer"
+
+ def __init__(
+ self,
+ attention_window: Union[List[int], int] = 512,
+ sep_token_id: int = 2,
+ pad_token_id: int = 1,
+ bos_token_id: int = 0,
+ eos_token_id: int = 2,
+ vocab_size: int = 30522,
+ hidden_size: int = 768,
+ num_hidden_layers: int = 12,
+ num_attention_heads: int = 12,
+ intermediate_size: int = 3072,
+ hidden_act: str = "gelu",
+ hidden_dropout_prob: float = 0.1,
+ attention_probs_dropout_prob: float = 0.1,
+ max_position_embeddings: int = 512,
+ type_vocab_size: int = 2,
+ initializer_range: float = 0.02,
+ layer_norm_eps: float = 1e-12,
+ onnx_export: bool = False,
+ **kwargs,
+ ):
+ """Constructs LongformerConfig."""
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
+
+ self.attention_window = attention_window
+ self.sep_token_id = sep_token_id
+ self.bos_token_id = bos_token_id
+ self.eos_token_id = eos_token_id
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.hidden_act = hidden_act
+ self.intermediate_size = intermediate_size
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.max_position_embeddings = max_position_embeddings
+ self.type_vocab_size = type_vocab_size
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.onnx_export = onnx_export
+
+
+class LongformerOnnxConfig(OnnxConfig):
+ def __init__(self, config: "PretrainedConfig", task: str = "default", patching_specs: "List[PatchingSpec]" = None):
+ super().__init__(config, task, patching_specs)
+ config.onnx_export = True
+
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ if self.task == "multiple-choice":
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
+ else:
+ dynamic_axis = {0: "batch", 1: "sequence"}
+ return OrderedDict(
+ [
+ ("input_ids", dynamic_axis),
+ ("attention_mask", dynamic_axis),
+ ("global_attention_mask", dynamic_axis),
+ ]
+ )
+
+ @property
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
+ outputs = super().outputs
+ if self.task == "default":
+ outputs["pooler_output"] = {0: "batch"}
+ return outputs
+
+ @property
+ def atol_for_validation(self) -> float:
+ """
+ What absolute tolerance value to use during model conversion validation.
+
+ Returns:
+ Float absolute tolerance value.
+ """
+ return 1e-4
+
+ @property
+ def default_onnx_opset(self) -> int:
+ # needs to be >= 14 to support tril operator
+ return max(super().default_onnx_opset, 14)
+
+ def generate_dummy_inputs(
+ self,
+ tokenizer: "PreTrainedTokenizerBase",
+ batch_size: int = -1,
+ seq_length: int = -1,
+ is_pair: bool = False,
+ framework: Optional[TensorType] = None,
+ ) -> Mapping[str, Any]:
+ inputs = super().generate_dummy_inputs(
+ preprocessor=tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
+ )
+ import torch
+
+ # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
+ # makes the export fail randomly
+ inputs["global_attention_mask"] = torch.zeros_like(inputs["input_ids"])
+ # make every second token global
+ inputs["global_attention_mask"][:, ::2] = 1
+
+ return inputs
diff --git a/venv/lib/python3.10/site-packages/transformers/models/longformer/convert_longformer_original_pytorch_lightning_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/longformer/convert_longformer_original_pytorch_lightning_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed7d32ab3edbefa8a16307b7bcf35d615c63a66f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/longformer/convert_longformer_original_pytorch_lightning_to_pytorch.py
@@ -0,0 +1,86 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert RoBERTa checkpoint."""
+
+
+import argparse
+
+import pytorch_lightning as pl
+import torch
+from torch import nn
+
+from transformers import LongformerForQuestionAnswering, LongformerModel
+
+
+class LightningModel(pl.LightningModule):
+ def __init__(self, model):
+ super().__init__()
+ self.model = model
+ self.num_labels = 2
+ self.qa_outputs = nn.Linear(self.model.config.hidden_size, self.num_labels)
+
+ # implement only because lightning requires to do so
+ def forward(self):
+ pass
+
+
+def convert_longformer_qa_checkpoint_to_pytorch(
+ longformer_model: str, longformer_question_answering_ckpt_path: str, pytorch_dump_folder_path: str
+):
+ # load longformer model from model identifier
+ longformer = LongformerModel.from_pretrained(longformer_model)
+ lightning_model = LightningModel(longformer)
+
+ ckpt = torch.load(longformer_question_answering_ckpt_path, map_location=torch.device("cpu"))
+ lightning_model.load_state_dict(ckpt["state_dict"])
+
+ # init longformer question answering model
+ longformer_for_qa = LongformerForQuestionAnswering.from_pretrained(longformer_model)
+
+ # transfer weights
+ longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict())
+ longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict())
+ longformer_for_qa.eval()
+
+ # save model
+ longformer_for_qa.save_pretrained(pytorch_dump_folder_path)
+
+ print(f"Conversion successful. Model saved under {pytorch_dump_folder_path}")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--longformer_model",
+ default=None,
+ type=str,
+ required=True,
+ help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
+ )
+ parser.add_argument(
+ "--longformer_question_answering_ckpt_path",
+ default=None,
+ type=str,
+ required=True,
+ help="Path the official PyTorch Lightning Checkpoint.",
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
+ )
+ args = parser.parse_args()
+ convert_longformer_qa_checkpoint_to_pytorch(
+ args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/longformer/modeling_longformer.py b/venv/lib/python3.10/site-packages/transformers/models/longformer/modeling_longformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..f8c7c44ef9918cbf1de35f7564a4ccfa639ef524
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/longformer/modeling_longformer.py
@@ -0,0 +1,2327 @@
+# coding=utf-8
+# Copyright 2020 The Allen Institute for AI team and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch Longformer model."""
+
+import math
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN, gelu
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_longformer import LongformerConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "allenai/longformer-base-4096"
+_CONFIG_FOR_DOC = "LongformerConfig"
+
+
+from ..deprecated._archive_maps import LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+@dataclass
+class LongformerBaseModelOutput(ModelOutput):
+ """
+ Base class for Longformer's outputs, with potential hidden states, local and global attentions.
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
+ attention_window + 1)`, where `x` is the number of tokens with global attention mask.
+
+ Local attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token in the sequence to every token with
+ global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
+ remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
+ token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
+ (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
+ If the attention window contains a token with global attention, the attention weight at the corresponding
+ index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
+ attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
+ accessed from `global_attentions`.
+ global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
+ where `x` is the number of tokens with global attention mask.
+
+ Global attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token with global attention to every token
+ in the sequence.
+ """
+
+ last_hidden_state: torch.FloatTensor
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+ global_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+@dataclass
+class LongformerBaseModelOutputWithPooling(ModelOutput):
+ """
+ Base class for Longformer's outputs that also contains a pooling of the last hidden states.
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
+ Last layer hidden-state of the first token of the sequence (classification token) further processed by a
+ Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence
+ prediction (classification) objective during pretraining.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
+ attention_window + 1)`, where `x` is the number of tokens with global attention mask.
+
+ Local attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token in the sequence to every token with
+ global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
+ remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
+ token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
+ (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
+ If the attention window contains a token with global attention, the attention weight at the corresponding
+ index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
+ attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
+ accessed from `global_attentions`.
+ global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
+ where `x` is the number of tokens with global attention mask.
+
+ Global attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token with global attention to every token
+ in the sequence.
+ """
+
+ last_hidden_state: torch.FloatTensor
+ pooler_output: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+ global_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+@dataclass
+class LongformerMaskedLMOutput(ModelOutput):
+ """
+ Base class for masked language models outputs.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Masked language modeling (MLM) loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
+ attention_window + 1)`, where `x` is the number of tokens with global attention mask.
+
+ Local attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token in the sequence to every token with
+ global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
+ remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
+ token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
+ (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
+ If the attention window contains a token with global attention, the attention weight at the corresponding
+ index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
+ attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
+ accessed from `global_attentions`.
+ global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
+ where `x` is the number of tokens with global attention mask.
+
+ Global attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token with global attention to every token
+ in the sequence.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+ global_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+@dataclass
+class LongformerQuestionAnsweringModelOutput(ModelOutput):
+ """
+ Base class for outputs of question answering Longformer models.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
+ start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
+ Span-start scores (before SoftMax).
+ end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
+ Span-end scores (before SoftMax).
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
+ attention_window + 1)`, where `x` is the number of tokens with global attention mask.
+
+ Local attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token in the sequence to every token with
+ global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
+ remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
+ token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
+ (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
+ If the attention window contains a token with global attention, the attention weight at the corresponding
+ index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
+ attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
+ accessed from `global_attentions`.
+ global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
+ where `x` is the number of tokens with global attention mask.
+
+ Global attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token with global attention to every token
+ in the sequence.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ start_logits: torch.FloatTensor = None
+ end_logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+ global_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+@dataclass
+class LongformerSequenceClassifierOutput(ModelOutput):
+ """
+ Base class for outputs of sentence classification models.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Classification (or regression if config.num_labels==1) loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
+ attention_window + 1)`, where `x` is the number of tokens with global attention mask.
+
+ Local attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token in the sequence to every token with
+ global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
+ remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
+ token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
+ (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
+ If the attention window contains a token with global attention, the attention weight at the corresponding
+ index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
+ attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
+ accessed from `global_attentions`.
+ global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
+ where `x` is the number of tokens with global attention mask.
+
+ Global attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token with global attention to every token
+ in the sequence.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+ global_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+@dataclass
+class LongformerMultipleChoiceModelOutput(ModelOutput):
+ """
+ Base class for outputs of multiple choice Longformer models.
+
+ Args:
+ loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided):
+ Classification loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`):
+ *num_choices* is the second dimension of the input tensors. (see *input_ids* above).
+
+ Classification scores (before SoftMax).
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
+ attention_window + 1)`, where `x` is the number of tokens with global attention mask.
+
+ Local attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token in the sequence to every token with
+ global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
+ remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
+ token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
+ (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
+ If the attention window contains a token with global attention, the attention weight at the corresponding
+ index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
+ attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
+ accessed from `global_attentions`.
+ global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
+ where `x` is the number of tokens with global attention mask.
+
+ Global attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token with global attention to every token
+ in the sequence.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+ global_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+@dataclass
+class LongformerTokenClassifierOutput(ModelOutput):
+ """
+ Base class for outputs of token classification models.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided) :
+ Classification loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`):
+ Classification scores (before SoftMax).
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
+ attention_window + 1)`, where `x` is the number of tokens with global attention mask.
+
+ Local attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token in the sequence to every token with
+ global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
+ remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
+ token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
+ (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
+ If the attention window contains a token with global attention, the attention weight at the corresponding
+ index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
+ attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
+ accessed from `global_attentions`.
+ global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
+ where `x` is the number of tokens with global attention mask.
+
+ Global attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token with global attention to every token
+ in the sequence.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+ global_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+def _get_question_end_index(input_ids, sep_token_id):
+ """
+ Computes the index of the first occurrence of `sep_token_id`.
+ """
+
+ sep_token_indices = (input_ids == sep_token_id).nonzero()
+ batch_size = input_ids.shape[0]
+
+ assert sep_token_indices.shape[1] == 2, "`input_ids` should have two dimensions"
+ assert sep_token_indices.shape[0] == 3 * batch_size, (
+ f"There should be exactly three separator tokens: {sep_token_id} in every sample for questions answering. You"
+ " might also consider to set `global_attention_mask` manually in the forward function to avoid this error."
+ )
+ return sep_token_indices.view(batch_size, 3, 2)[:, 0, 1]
+
+
+def _compute_global_attention_mask(input_ids, sep_token_id, before_sep_token=True):
+ """
+ Computes global attention mask by putting attention on all tokens before `sep_token_id` if `before_sep_token is
+ True` else after `sep_token_id`.
+ """
+ question_end_index = _get_question_end_index(input_ids, sep_token_id)
+ question_end_index = question_end_index.unsqueeze(dim=1) # size: batch_size x 1
+ # bool attention mask with True in locations of global attention
+ attention_mask = torch.arange(input_ids.shape[1], device=input_ids.device)
+ if before_sep_token is True:
+ attention_mask = (attention_mask.expand_as(input_ids) < question_end_index).to(torch.bool)
+ else:
+ # last token is separation token and should not be counted and in the middle are two separation tokens
+ attention_mask = (attention_mask.expand_as(input_ids) > (question_end_index + 1)).to(torch.bool) * (
+ attention_mask.expand_as(input_ids) < input_ids.shape[-1]
+ ).to(torch.bool)
+
+ return attention_mask
+
+
+def create_position_ids_from_input_ids(input_ids, padding_idx):
+ """
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
+ are ignored. This is modified from fairseq's `utils.make_positions`.
+
+ Args:
+ x: torch.Tensor x:
+
+ Returns: torch.Tensor
+ """
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
+ mask = input_ids.ne(padding_idx).int()
+ incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask
+ return incremental_indices.long() + padding_idx
+
+
+class LongformerEmbeddings(nn.Module):
+ """
+ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
+
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
+ # any TensorFlow checkpoint file
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ self.padding_idx = config.pad_token_id
+ self.position_embeddings = nn.Embedding(
+ config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
+ )
+
+ def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
+ if position_ids is None:
+ if input_ids is not None:
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
+ position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx).to(input_ids.device)
+ else:
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
+
+ if input_ids is not None:
+ input_shape = input_ids.size()
+ else:
+ input_shape = inputs_embeds.size()[:-1]
+
+ if token_type_ids is None:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=position_ids.device)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+ position_embeddings = self.position_embeddings(position_ids)
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
+
+ embeddings = inputs_embeds + position_embeddings + token_type_embeddings
+ embeddings = self.LayerNorm(embeddings)
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds):
+ """
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
+
+ Args:
+ inputs_embeds: torch.Tensor inputs_embeds:
+
+ Returns: torch.Tensor
+ """
+ input_shape = inputs_embeds.size()[:-1]
+ sequence_length = input_shape[1]
+
+ position_ids = torch.arange(
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
+ )
+ return position_ids.unsqueeze(0).expand(input_shape)
+
+
+class LongformerSelfAttention(nn.Module):
+ def __init__(self, config, layer_id):
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0:
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads})"
+ )
+ self.num_heads = config.num_attention_heads
+ self.head_dim = int(config.hidden_size / config.num_attention_heads)
+ self.embed_dim = config.hidden_size
+
+ self.query = nn.Linear(config.hidden_size, self.embed_dim)
+ self.key = nn.Linear(config.hidden_size, self.embed_dim)
+ self.value = nn.Linear(config.hidden_size, self.embed_dim)
+
+ # separate projection layers for tokens with global attention
+ self.query_global = nn.Linear(config.hidden_size, self.embed_dim)
+ self.key_global = nn.Linear(config.hidden_size, self.embed_dim)
+ self.value_global = nn.Linear(config.hidden_size, self.embed_dim)
+
+ self.dropout = config.attention_probs_dropout_prob
+
+ self.layer_id = layer_id
+ attention_window = config.attention_window[self.layer_id]
+ assert (
+ attention_window % 2 == 0
+ ), f"`attention_window` for layer {self.layer_id} has to be an even value. Given {attention_window}"
+ assert (
+ attention_window > 0
+ ), f"`attention_window` for layer {self.layer_id} has to be positive. Given {attention_window}"
+
+ self.one_sided_attn_window_size = attention_window // 2
+
+ self.config = config
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ layer_head_mask=None,
+ is_index_masked=None,
+ is_index_global_attn=None,
+ is_global_attn=None,
+ output_attentions=False,
+ ):
+ """
+ [`LongformerSelfAttention`] expects *len(hidden_states)* to be multiple of *attention_window*. Padding to
+ *attention_window* happens in [`LongformerModel.forward`] to avoid redoing the padding on each layer.
+
+ The *attention_mask* is changed in [`LongformerModel.forward`] from 0, 1, 2 to:
+
+ - -10000: no attention
+ - 0: local attention
+ - +10000: global attention
+ """
+ hidden_states = hidden_states.transpose(0, 1)
+
+ # project hidden states
+ query_vectors = self.query(hidden_states)
+ key_vectors = self.key(hidden_states)
+ value_vectors = self.value(hidden_states)
+
+ seq_len, batch_size, embed_dim = hidden_states.size()
+ assert (
+ embed_dim == self.embed_dim
+ ), f"hidden_states should have embed_dim = {self.embed_dim}, but has {embed_dim}"
+
+ # normalize query
+ query_vectors /= math.sqrt(self.head_dim)
+
+ query_vectors = query_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1)
+ key_vectors = key_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1)
+
+ attn_scores = self._sliding_chunks_query_key_matmul(
+ query_vectors, key_vectors, self.one_sided_attn_window_size
+ )
+
+ # values to pad for attention probs
+ remove_from_windowed_attention_mask = (attention_mask != 0)[:, :, None, None]
+
+ # cast to fp32/fp16 then replace 1's with -inf
+ float_mask = remove_from_windowed_attention_mask.type_as(query_vectors).masked_fill(
+ remove_from_windowed_attention_mask, torch.finfo(query_vectors.dtype).min
+ )
+ # diagonal mask with zeros everywhere and -inf inplace of padding
+ diagonal_mask = self._sliding_chunks_query_key_matmul(
+ float_mask.new_ones(size=float_mask.size()), float_mask, self.one_sided_attn_window_size
+ )
+
+ # pad local attention probs
+ attn_scores += diagonal_mask
+
+ assert list(attn_scores.size()) == [
+ batch_size,
+ seq_len,
+ self.num_heads,
+ self.one_sided_attn_window_size * 2 + 1,
+ ], (
+ f"local_attn_probs should be of size ({batch_size}, {seq_len}, {self.num_heads},"
+ f" {self.one_sided_attn_window_size * 2 + 1}), but is of size {attn_scores.size()}"
+ )
+
+ # compute local attention probs from global attention keys and contact over window dim
+ if is_global_attn:
+ # compute global attn indices required through out forward fn
+ (
+ max_num_global_attn_indices,
+ is_index_global_attn_nonzero,
+ is_local_index_global_attn_nonzero,
+ is_local_index_no_global_attn_nonzero,
+ ) = self._get_global_attn_indices(is_index_global_attn)
+ # calculate global attn probs from global key
+
+ global_key_attn_scores = self._concat_with_global_key_attn_probs(
+ query_vectors=query_vectors,
+ key_vectors=key_vectors,
+ max_num_global_attn_indices=max_num_global_attn_indices,
+ is_index_global_attn_nonzero=is_index_global_attn_nonzero,
+ is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
+ is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero,
+ )
+ # concat to local_attn_probs
+ # (batch_size, seq_len, num_heads, extra attention count + 2*window+1)
+ attn_scores = torch.cat((global_key_attn_scores, attn_scores), dim=-1)
+
+ # free memory
+ del global_key_attn_scores
+
+ attn_probs = nn.functional.softmax(
+ attn_scores, dim=-1, dtype=torch.float32
+ ) # use fp32 for numerical stability
+
+ if layer_head_mask is not None:
+ assert layer_head_mask.size() == (
+ self.num_heads,
+ ), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}"
+ attn_probs = layer_head_mask.view(1, 1, -1, 1) * attn_probs
+
+ # softmax sometimes inserts NaN if all positions are masked, replace them with 0
+ attn_probs = torch.masked_fill(attn_probs, is_index_masked[:, :, None, None], 0.0)
+ attn_probs = attn_probs.type_as(attn_scores)
+
+ # free memory
+ del attn_scores
+
+ # apply dropout
+ attn_probs = nn.functional.dropout(attn_probs, p=self.dropout, training=self.training)
+
+ value_vectors = value_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1)
+
+ # compute local attention output with global attention value and add
+ if is_global_attn:
+ # compute sum of global and local attn
+ attn_output = self._compute_attn_output_with_global_indices(
+ value_vectors=value_vectors,
+ attn_probs=attn_probs,
+ max_num_global_attn_indices=max_num_global_attn_indices,
+ is_index_global_attn_nonzero=is_index_global_attn_nonzero,
+ is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
+ )
+ else:
+ # compute local attn only
+ attn_output = self._sliding_chunks_matmul_attn_probs_value(
+ attn_probs, value_vectors, self.one_sided_attn_window_size
+ )
+
+ assert attn_output.size() == (batch_size, seq_len, self.num_heads, self.head_dim), "Unexpected size"
+ attn_output = attn_output.transpose(0, 1).reshape(seq_len, batch_size, embed_dim).contiguous()
+
+ # compute value for global attention and overwrite to attention output
+ # TODO: remove the redundant computation
+ if is_global_attn:
+ global_attn_output, global_attn_probs = self._compute_global_attn_output_from_hidden(
+ hidden_states=hidden_states,
+ max_num_global_attn_indices=max_num_global_attn_indices,
+ layer_head_mask=layer_head_mask,
+ is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
+ is_index_global_attn_nonzero=is_index_global_attn_nonzero,
+ is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero,
+ is_index_masked=is_index_masked,
+ )
+
+ # get only non zero global attn output
+ nonzero_global_attn_output = global_attn_output[
+ is_local_index_global_attn_nonzero[0], :, is_local_index_global_attn_nonzero[1]
+ ]
+
+ # overwrite values with global attention
+ attn_output[is_index_global_attn_nonzero[::-1]] = nonzero_global_attn_output.view(
+ len(is_local_index_global_attn_nonzero[0]), -1
+ )
+ # The attention weights for tokens with global attention are
+ # just filler values, they were never used to compute the output.
+ # Fill with 0 now, the correct values are in 'global_attn_probs'.
+ attn_probs[is_index_global_attn_nonzero] = 0
+
+ outputs = (attn_output.transpose(0, 1),)
+
+ if output_attentions:
+ outputs += (attn_probs,)
+
+ return outputs + (global_attn_probs,) if (is_global_attn and output_attentions) else outputs
+
+ @staticmethod
+ def _pad_and_transpose_last_two_dims(hidden_states_padded, padding):
+ """pads rows and then flips rows and columns"""
+ hidden_states_padded = nn.functional.pad(
+ hidden_states_padded, padding
+ ) # padding value is not important because it will be overwritten
+ hidden_states_padded = hidden_states_padded.view(
+ *hidden_states_padded.size()[:-2], hidden_states_padded.size(-1), hidden_states_padded.size(-2)
+ )
+ return hidden_states_padded
+
+ @staticmethod
+ def _pad_and_diagonalize(chunked_hidden_states):
+ """
+ shift every row 1 step right, converting columns into diagonals.
+
+ Example:
+
+ ```python
+ chunked_hidden_states: [
+ 0.4983,
+ 2.6918,
+ -0.0071,
+ 1.0492,
+ -1.8348,
+ 0.7672,
+ 0.2986,
+ 0.0285,
+ -0.7584,
+ 0.4206,
+ -0.0405,
+ 0.1599,
+ 2.0514,
+ -1.1600,
+ 0.5372,
+ 0.2629,
+ ]
+ window_overlap = num_rows = 4
+ ```
+
+ (pad & diagonalize) => [ 0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000
+ 0.0000, -1.8348, 0.7672, 0.2986, 0.0285, 0.0000, 0.0000 0.0000, 0.0000, -0.7584, 0.4206,
+ -0.0405, 0.1599, 0.0000 0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629 ]
+ """
+ total_num_heads, num_chunks, window_overlap, hidden_dim = chunked_hidden_states.size()
+ chunked_hidden_states = nn.functional.pad(
+ chunked_hidden_states, (0, window_overlap + 1)
+ ) # total_num_heads x num_chunks x window_overlap x (hidden_dim+window_overlap+1). Padding value is not important because it'll be overwritten
+ chunked_hidden_states = chunked_hidden_states.view(
+ total_num_heads, num_chunks, -1
+ ) # total_num_heads x num_chunks x window_overlap*window_overlap+window_overlap
+ chunked_hidden_states = chunked_hidden_states[
+ :, :, :-window_overlap
+ ] # total_num_heads x num_chunks x window_overlap*window_overlap
+ chunked_hidden_states = chunked_hidden_states.view(
+ total_num_heads, num_chunks, window_overlap, window_overlap + hidden_dim
+ )
+ chunked_hidden_states = chunked_hidden_states[:, :, :, :-1]
+ return chunked_hidden_states
+
+ @staticmethod
+ def _chunk(hidden_states, window_overlap, onnx_export: bool = False):
+ """convert into overlapping chunks. Chunk size = 2w, overlap size = w"""
+ if not onnx_export:
+ # non-overlapping chunks of size = 2w
+ hidden_states = hidden_states.view(
+ hidden_states.size(0),
+ torch.div(hidden_states.size(1), (window_overlap * 2), rounding_mode="trunc"),
+ window_overlap * 2,
+ hidden_states.size(2),
+ )
+ # use `as_strided` to make the chunks overlap with an overlap size = window_overlap
+ chunk_size = list(hidden_states.size())
+ chunk_size[1] = chunk_size[1] * 2 - 1
+
+ chunk_stride = list(hidden_states.stride())
+ chunk_stride[1] = chunk_stride[1] // 2
+ return hidden_states.as_strided(size=chunk_size, stride=chunk_stride)
+
+ # When exporting to ONNX, use this separate logic
+ # have to use slow implementation since as_strided, unfold and 2d-tensor indexing aren't supported (yet) in ONNX export
+
+ # TODO replace this with
+ # > return hidden_states.unfold(dimension=1, size=window_overlap * 2, step=window_overlap).transpose(2, 3)
+ # once `unfold` is supported
+ # the case hidden_states.size(1) == window_overlap * 2 can also simply return hidden_states.unsqueeze(1), but that's control flow
+
+ chunk_size = [
+ hidden_states.size(0),
+ torch.div(hidden_states.size(1), window_overlap, rounding_mode="trunc") - 1,
+ window_overlap * 2,
+ hidden_states.size(2),
+ ]
+
+ overlapping_chunks = torch.empty(chunk_size, device=hidden_states.device)
+ for chunk in range(chunk_size[1]):
+ overlapping_chunks[:, chunk, :, :] = hidden_states[
+ :, chunk * window_overlap : chunk * window_overlap + 2 * window_overlap, :
+ ]
+ return overlapping_chunks
+
+ @staticmethod
+ def _mask_invalid_locations(input_tensor, affected_seq_len) -> torch.Tensor:
+ beginning_mask_2d = input_tensor.new_ones(affected_seq_len, affected_seq_len + 1).tril().flip(dims=[0])
+ beginning_mask = beginning_mask_2d[None, :, None, :]
+ ending_mask = beginning_mask.flip(dims=(1, 3))
+ beginning_input = input_tensor[:, :affected_seq_len, :, : affected_seq_len + 1]
+ beginning_mask = beginning_mask.expand(beginning_input.size())
+ input_tensor[:, :affected_seq_len, :, : affected_seq_len + 1] = torch.full_like(
+ beginning_input, -float("inf")
+ ).where(beginning_mask.bool(), beginning_input)
+ ending_input = input_tensor[:, -affected_seq_len:, :, -(affected_seq_len + 1) :]
+ ending_mask = ending_mask.expand(ending_input.size())
+ input_tensor[:, -affected_seq_len:, :, -(affected_seq_len + 1) :] = torch.full_like(
+ ending_input, -float("inf")
+ ).where(ending_mask.bool(), ending_input)
+
+ def _sliding_chunks_query_key_matmul(self, query: torch.Tensor, key: torch.Tensor, window_overlap: int):
+ """
+ Matrix multiplication of query and key tensors using with a sliding window attention pattern. This
+ implementation splits the input into overlapping chunks of size 2w (e.g. 512 for pretrained Longformer) with an
+ overlap of size window_overlap
+ """
+ batch_size, seq_len, num_heads, head_dim = query.size()
+ assert (
+ seq_len % (window_overlap * 2) == 0
+ ), f"Sequence length should be multiple of {window_overlap * 2}. Given {seq_len}"
+ assert query.size() == key.size()
+
+ chunks_count = torch.div(seq_len, window_overlap, rounding_mode="trunc") - 1
+
+ # group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size window_overlap * 2
+ query = query.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim)
+ key = key.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim)
+
+ query = self._chunk(query, window_overlap, getattr(self.config, "onnx_export", False))
+ key = self._chunk(key, window_overlap, getattr(self.config, "onnx_export", False))
+
+ # matrix multiplication
+ # bcxd: batch_size * num_heads x chunks x 2window_overlap x head_dim
+ # bcyd: batch_size * num_heads x chunks x 2window_overlap x head_dim
+ # bcxy: batch_size * num_heads x chunks x 2window_overlap x 2window_overlap
+ diagonal_chunked_attention_scores = torch.einsum("bcxd,bcyd->bcxy", (query, key)) # multiply
+
+ # convert diagonals into columns
+ diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims(
+ diagonal_chunked_attention_scores, padding=(0, 0, 0, 1)
+ )
+
+ # allocate space for the overall attention matrix where the chunks are combined. The last dimension
+ # has (window_overlap * 2 + 1) columns. The first (window_overlap) columns are the window_overlap lower triangles (attention from a word to
+ # window_overlap previous words). The following column is attention score from each word to itself, then
+ # followed by window_overlap columns for the upper triangle.
+
+ diagonal_attention_scores = diagonal_chunked_attention_scores.new_zeros(
+ (batch_size * num_heads, chunks_count + 1, window_overlap, window_overlap * 2 + 1)
+ )
+
+ # copy parts from diagonal_chunked_attention_scores into the combined matrix of attentions
+ # - copying the main diagonal and the upper triangle
+ diagonal_attention_scores[:, :-1, :, window_overlap:] = diagonal_chunked_attention_scores[
+ :, :, :window_overlap, : window_overlap + 1
+ ]
+ diagonal_attention_scores[:, -1, :, window_overlap:] = diagonal_chunked_attention_scores[
+ :, -1, window_overlap:, : window_overlap + 1
+ ]
+ # - copying the lower triangle
+ diagonal_attention_scores[:, 1:, :, :window_overlap] = diagonal_chunked_attention_scores[
+ :, :, -(window_overlap + 1) : -1, window_overlap + 1 :
+ ]
+
+ diagonal_attention_scores[:, 0, 1:window_overlap, 1:window_overlap] = diagonal_chunked_attention_scores[
+ :, 0, : window_overlap - 1, 1 - window_overlap :
+ ]
+
+ # separate batch_size and num_heads dimensions again
+ diagonal_attention_scores = diagonal_attention_scores.view(
+ batch_size, num_heads, seq_len, 2 * window_overlap + 1
+ ).transpose(2, 1)
+
+ self._mask_invalid_locations(diagonal_attention_scores, window_overlap)
+ return diagonal_attention_scores
+
+ def _sliding_chunks_matmul_attn_probs_value(
+ self, attn_probs: torch.Tensor, value: torch.Tensor, window_overlap: int
+ ):
+ """
+ Same as _sliding_chunks_query_key_matmul but for attn_probs and value tensors. Returned tensor will be of the
+ same shape as `attn_probs`
+ """
+ batch_size, seq_len, num_heads, head_dim = value.size()
+
+ assert seq_len % (window_overlap * 2) == 0
+ assert attn_probs.size()[:3] == value.size()[:3]
+ assert attn_probs.size(3) == 2 * window_overlap + 1
+ chunks_count = torch.div(seq_len, window_overlap, rounding_mode="trunc") - 1
+ # group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size 2 window overlap
+
+ chunked_attn_probs = attn_probs.transpose(1, 2).reshape(
+ batch_size * num_heads,
+ torch.div(seq_len, window_overlap, rounding_mode="trunc"),
+ window_overlap,
+ 2 * window_overlap + 1,
+ )
+
+ # group batch_size and num_heads dimensions into one
+ value = value.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim)
+
+ # pad seq_len with w at the beginning of the sequence and another window overlap at the end
+ padded_value = nn.functional.pad(value, (0, 0, window_overlap, window_overlap), value=-1)
+
+ # chunk padded_value into chunks of size 3 window overlap and an overlap of size window overlap
+ chunked_value_size = (batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim)
+ chunked_value_stride = padded_value.stride()
+ chunked_value_stride = (
+ chunked_value_stride[0],
+ window_overlap * chunked_value_stride[1],
+ chunked_value_stride[1],
+ chunked_value_stride[2],
+ )
+ chunked_value = padded_value.as_strided(size=chunked_value_size, stride=chunked_value_stride)
+
+ chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs)
+
+ context = torch.einsum("bcwd,bcdh->bcwh", (chunked_attn_probs, chunked_value))
+ return context.view(batch_size, num_heads, seq_len, head_dim).transpose(1, 2)
+
+ @staticmethod
+ def _get_global_attn_indices(is_index_global_attn):
+ """compute global attn indices required throughout forward pass"""
+ # helper variable
+ num_global_attn_indices = is_index_global_attn.long().sum(dim=1)
+
+ # max number of global attn indices in batch
+ max_num_global_attn_indices = num_global_attn_indices.max()
+
+ # indices of global attn
+ is_index_global_attn_nonzero = is_index_global_attn.nonzero(as_tuple=True)
+
+ # helper variable
+ is_local_index_global_attn = torch.arange(
+ max_num_global_attn_indices, device=is_index_global_attn.device
+ ) < num_global_attn_indices.unsqueeze(dim=-1)
+
+ # location of the non-padding values within global attention indices
+ is_local_index_global_attn_nonzero = is_local_index_global_attn.nonzero(as_tuple=True)
+
+ # location of the padding values within global attention indices
+ is_local_index_no_global_attn_nonzero = (is_local_index_global_attn == 0).nonzero(as_tuple=True)
+ return (
+ max_num_global_attn_indices,
+ is_index_global_attn_nonzero,
+ is_local_index_global_attn_nonzero,
+ is_local_index_no_global_attn_nonzero,
+ )
+
+ def _concat_with_global_key_attn_probs(
+ self,
+ key_vectors,
+ query_vectors,
+ max_num_global_attn_indices,
+ is_index_global_attn_nonzero,
+ is_local_index_global_attn_nonzero,
+ is_local_index_no_global_attn_nonzero,
+ ):
+ batch_size = key_vectors.shape[0]
+
+ # create only global key vectors
+ key_vectors_only_global = key_vectors.new_zeros(
+ batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim
+ )
+
+ key_vectors_only_global[is_local_index_global_attn_nonzero] = key_vectors[is_index_global_attn_nonzero]
+
+ # (batch_size, seq_len, num_heads, max_num_global_attn_indices)
+ attn_probs_from_global_key = torch.einsum("blhd,bshd->blhs", (query_vectors, key_vectors_only_global))
+
+ # need to transpose since ONNX export only supports consecutive indexing: https://pytorch.org/docs/stable/onnx.html#writes-sets
+ attn_probs_from_global_key = attn_probs_from_global_key.transpose(1, 3)
+ attn_probs_from_global_key[
+ is_local_index_no_global_attn_nonzero[0], is_local_index_no_global_attn_nonzero[1], :, :
+ ] = torch.finfo(attn_probs_from_global_key.dtype).min
+ attn_probs_from_global_key = attn_probs_from_global_key.transpose(1, 3)
+
+ return attn_probs_from_global_key
+
+ def _compute_attn_output_with_global_indices(
+ self,
+ value_vectors,
+ attn_probs,
+ max_num_global_attn_indices,
+ is_index_global_attn_nonzero,
+ is_local_index_global_attn_nonzero,
+ ):
+ batch_size = attn_probs.shape[0]
+
+ # cut local attn probs to global only
+ attn_probs_only_global = attn_probs.narrow(-1, 0, max_num_global_attn_indices)
+ # get value vectors for global only
+ value_vectors_only_global = value_vectors.new_zeros(
+ batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim
+ )
+ value_vectors_only_global[is_local_index_global_attn_nonzero] = value_vectors[is_index_global_attn_nonzero]
+
+ # use `matmul` because `einsum` crashes sometimes with fp16
+ # attn = torch.einsum('blhs,bshd->blhd', (selected_attn_probs, selected_v))
+ # compute attn output only global
+ attn_output_only_global = torch.matmul(
+ attn_probs_only_global.transpose(1, 2).clone(), value_vectors_only_global.transpose(1, 2).clone()
+ ).transpose(1, 2)
+
+ # reshape attn probs
+ attn_probs_without_global = attn_probs.narrow(
+ -1, max_num_global_attn_indices, attn_probs.size(-1) - max_num_global_attn_indices
+ ).contiguous()
+
+ # compute attn output with global
+ attn_output_without_global = self._sliding_chunks_matmul_attn_probs_value(
+ attn_probs_without_global, value_vectors, self.one_sided_attn_window_size
+ )
+ return attn_output_only_global + attn_output_without_global
+
+ def _compute_global_attn_output_from_hidden(
+ self,
+ hidden_states,
+ max_num_global_attn_indices,
+ layer_head_mask,
+ is_local_index_global_attn_nonzero,
+ is_index_global_attn_nonzero,
+ is_local_index_no_global_attn_nonzero,
+ is_index_masked,
+ ):
+ seq_len, batch_size = hidden_states.shape[:2]
+
+ # prepare global hidden states
+ global_attn_hidden_states = hidden_states.new_zeros(max_num_global_attn_indices, batch_size, self.embed_dim)
+ global_attn_hidden_states[is_local_index_global_attn_nonzero[::-1]] = hidden_states[
+ is_index_global_attn_nonzero[::-1]
+ ]
+
+ # global key, query, value
+ global_query_vectors_only_global = self.query_global(global_attn_hidden_states)
+ global_key_vectors = self.key_global(hidden_states)
+ global_value_vectors = self.value_global(hidden_states)
+
+ # normalize
+ global_query_vectors_only_global /= math.sqrt(self.head_dim)
+
+ # reshape
+ global_query_vectors_only_global = (
+ global_query_vectors_only_global.contiguous()
+ .view(max_num_global_attn_indices, batch_size * self.num_heads, self.head_dim)
+ .transpose(0, 1)
+ ) # (batch_size * self.num_heads, max_num_global_attn_indices, head_dim)
+ global_key_vectors = (
+ global_key_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1)
+ ) # batch_size * self.num_heads, seq_len, head_dim)
+ global_value_vectors = (
+ global_value_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1)
+ ) # batch_size * self.num_heads, seq_len, head_dim)
+
+ # compute attn scores
+ global_attn_scores = torch.bmm(global_query_vectors_only_global, global_key_vectors.transpose(1, 2))
+
+ assert list(global_attn_scores.size()) == [
+ batch_size * self.num_heads,
+ max_num_global_attn_indices,
+ seq_len,
+ ], (
+ "global_attn_scores have the wrong size. Size should be"
+ f" {(batch_size * self.num_heads, max_num_global_attn_indices, seq_len)}, but is"
+ f" {global_attn_scores.size()}."
+ )
+
+ global_attn_scores = global_attn_scores.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len)
+
+ # need to transpose since ONNX export only supports consecutive indexing: https://pytorch.org/docs/stable/onnx.html#writes-sets
+ global_attn_scores = global_attn_scores.transpose(1, 2)
+ global_attn_scores[
+ is_local_index_no_global_attn_nonzero[0], is_local_index_no_global_attn_nonzero[1], :, :
+ ] = torch.finfo(global_attn_scores.dtype).min
+ global_attn_scores = global_attn_scores.transpose(1, 2)
+
+ global_attn_scores = global_attn_scores.masked_fill(
+ is_index_masked[:, None, None, :],
+ torch.finfo(global_attn_scores.dtype).min,
+ )
+
+ global_attn_scores = global_attn_scores.view(batch_size * self.num_heads, max_num_global_attn_indices, seq_len)
+
+ # compute global attn probs
+ global_attn_probs_float = nn.functional.softmax(
+ global_attn_scores, dim=-1, dtype=torch.float32
+ ) # use fp32 for numerical stability
+
+ # apply layer head masking
+ if layer_head_mask is not None:
+ assert layer_head_mask.size() == (
+ self.num_heads,
+ ), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}"
+ global_attn_probs_float = layer_head_mask.view(1, -1, 1, 1) * global_attn_probs_float.view(
+ batch_size, self.num_heads, max_num_global_attn_indices, seq_len
+ )
+ global_attn_probs_float = global_attn_probs_float.view(
+ batch_size * self.num_heads, max_num_global_attn_indices, seq_len
+ )
+
+ global_attn_probs = nn.functional.dropout(
+ global_attn_probs_float.type_as(global_attn_scores), p=self.dropout, training=self.training
+ )
+
+ # global attn output
+ global_attn_output = torch.bmm(global_attn_probs, global_value_vectors)
+
+ assert list(global_attn_output.size()) == [
+ batch_size * self.num_heads,
+ max_num_global_attn_indices,
+ self.head_dim,
+ ], (
+ "global_attn_output tensor has the wrong size. Size should be"
+ f" {(batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim)}, but is"
+ f" {global_attn_output.size()}."
+ )
+
+ global_attn_probs = global_attn_probs.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len)
+ global_attn_output = global_attn_output.view(
+ batch_size, self.num_heads, max_num_global_attn_indices, self.head_dim
+ )
+ return global_attn_output, global_attn_probs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertSelfOutput
+class LongformerSelfOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+class LongformerAttention(nn.Module):
+ def __init__(self, config, layer_id=0):
+ super().__init__()
+ self.self = LongformerSelfAttention(config, layer_id)
+ self.output = LongformerSelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.self.query = prune_linear_layer(self.self.query, index)
+ self.self.key = prune_linear_layer(self.self.key, index)
+ self.self.value = prune_linear_layer(self.self.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ layer_head_mask=None,
+ is_index_masked=None,
+ is_index_global_attn=None,
+ is_global_attn=None,
+ output_attentions=False,
+ ):
+ self_outputs = self.self(
+ hidden_states,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ is_index_masked=is_index_masked,
+ is_index_global_attn=is_index_global_attn,
+ is_global_attn=is_global_attn,
+ output_attentions=output_attentions,
+ )
+ attn_output = self.output(self_outputs[0], hidden_states)
+ outputs = (attn_output,) + self_outputs[1:]
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertIntermediate
+class LongformerIntermediate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertOutput
+class LongformerOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+class LongformerLayer(nn.Module):
+ def __init__(self, config, layer_id=0):
+ super().__init__()
+ self.attention = LongformerAttention(config, layer_id)
+ self.intermediate = LongformerIntermediate(config)
+ self.output = LongformerOutput(config)
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ layer_head_mask=None,
+ is_index_masked=None,
+ is_index_global_attn=None,
+ is_global_attn=None,
+ output_attentions=False,
+ ):
+ self_attn_outputs = self.attention(
+ hidden_states,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ is_index_masked=is_index_masked,
+ is_index_global_attn=is_index_global_attn,
+ is_global_attn=is_global_attn,
+ output_attentions=output_attentions,
+ )
+ attn_output = self_attn_outputs[0]
+ outputs = self_attn_outputs[1:]
+
+ layer_output = apply_chunking_to_forward(
+ self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attn_output
+ )
+ outputs = (layer_output,) + outputs
+ return outputs
+
+ def ff_chunk(self, attn_output):
+ intermediate_output = self.intermediate(attn_output)
+ layer_output = self.output(intermediate_output, attn_output)
+ return layer_output
+
+
+class LongformerEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList([LongformerLayer(config, layer_id=i) for i in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ padding_len=0,
+ output_attentions=False,
+ output_hidden_states=False,
+ return_dict=True,
+ ):
+ is_index_masked = attention_mask < 0
+ is_index_global_attn = attention_mask > 0
+
+ # Record `is_global_attn == True` to enable ONNX export
+ is_global_attn = is_index_global_attn.flatten().any().item()
+
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None # All local attentions.
+ all_global_attentions = () if (output_attentions and is_global_attn) else None
+
+ # check if head_mask has a correct number of layers specified if desired
+ if head_mask is not None:
+ assert head_mask.size()[0] == (
+ len(self.layer)
+ ), f"The head_mask should be specified for {len(self.layer)} layers, but it is for {head_mask.size()[0]}."
+ for idx, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ attention_mask,
+ head_mask[idx] if head_mask is not None else None,
+ is_index_masked,
+ is_index_global_attn,
+ is_global_attn,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(
+ hidden_states,
+ attention_mask=attention_mask,
+ layer_head_mask=head_mask[idx] if head_mask is not None else None,
+ is_index_masked=is_index_masked,
+ is_index_global_attn=is_index_global_attn,
+ is_global_attn=is_global_attn,
+ output_attentions=output_attentions,
+ )
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ # bzs x seq_len x num_attn_heads x (num_global_attn + attention_window_len + 1) => bzs x num_attn_heads x seq_len x (num_global_attn + attention_window_len + 1)
+ all_attentions = all_attentions + (layer_outputs[1].transpose(1, 2),)
+
+ if is_global_attn:
+ # bzs x num_attn_heads x num_global_attn x seq_len => bzs x num_attn_heads x seq_len x num_global_attn
+ all_global_attentions = all_global_attentions + (layer_outputs[2].transpose(2, 3),)
+
+ # Add last layer
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ # undo padding if necessary
+ # unpad `hidden_states` because the calling function is expecting a length == input_ids.size(1)
+ hidden_states = hidden_states[:, : hidden_states.shape[1] - padding_len]
+ if output_hidden_states:
+ all_hidden_states = tuple([state[:, : state.shape[1] - padding_len] for state in all_hidden_states])
+
+ if output_attentions:
+ all_attentions = tuple([state[:, :, : state.shape[2] - padding_len, :] for state in all_attentions])
+
+ if not return_dict:
+ return tuple(
+ v for v in [hidden_states, all_hidden_states, all_attentions, all_global_attentions] if v is not None
+ )
+ return LongformerBaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_attentions,
+ global_attentions=all_global_attentions,
+ )
+
+
+# Copied from transformers.models.bert.modeling_bert.BertPooler
+class LongformerPooler(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.activation = nn.Tanh()
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(first_token_tensor)
+ pooled_output = self.activation(pooled_output)
+ return pooled_output
+
+
+# Copied from transformers.models.roberta.modeling_roberta.RobertaLMHead with Roberta->Longformer
+class LongformerLMHead(nn.Module):
+ """Longformer Head for masked language modeling."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
+ self.decoder.bias = self.bias
+
+ def forward(self, features, **kwargs):
+ x = self.dense(features)
+ x = gelu(x)
+ x = self.layer_norm(x)
+
+ # project back to size of vocabulary with bias
+ x = self.decoder(x)
+
+ return x
+
+ def _tie_weights(self):
+ # To tie those two weights if they get disconnected (on TPU or when the bias is resized)
+ # For accelerate compatibility and to not break backward compatibility
+ if self.decoder.bias.device.type == "meta":
+ self.decoder.bias = self.bias
+ else:
+ self.bias = self.decoder.bias
+
+
+class LongformerPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = LongformerConfig
+ base_model_prefix = "longformer"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["LongformerSelfAttention"]
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+LONGFORMER_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`LongformerConfig`]): Model configuration class with all the parameters of the
+ model. Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+LONGFORMER_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ global_attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to decide the attention given on each token, local attention or global attention. Tokens with global
+ attention attends to all other tokens, and all other tokens attend to them. This is important for
+ task-specific finetuning because it makes the model more flexible at representing the task. For example,
+ for classification, the token should be given global attention. For QA, all question tokens should also
+ have global attention. Please refer to the [Longformer paper](https://arxiv.org/abs/2004.05150) for more
+ details. Mask values selected in `[0, 1]`:
+
+ - 0 for local attention (a sliding window attention),
+ - 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).
+
+ head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ decoder_head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare Longformer Model outputting raw hidden-states without any specific head on top.",
+ LONGFORMER_START_DOCSTRING,
+)
+class LongformerModel(LongformerPreTrainedModel):
+ """
+ This class copied code from [`RobertaModel`] and overwrote standard self-attention with longformer self-attention
+ to provide the ability to process long sequences following the self-attention approach described in [Longformer:
+ the Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, and Arman Cohan.
+ Longformer self-attention combines a local (sliding window) and global attention to extend to long documents
+ without the O(n^2) increase in memory and compute.
+
+ The self-attention module `LongformerSelfAttention` implemented here supports the combination of local and global
+ attention but it lacks support for autoregressive attention and dilated attention. Autoregressive and dilated
+ attention are more relevant for autoregressive language modeling than finetuning on downstream tasks. Future
+ release will add support for autoregressive attention, but the support for dilated attention requires a custom CUDA
+ kernel to be memory and compute efficient.
+
+ """
+
+ def __init__(self, config, add_pooling_layer=True):
+ super().__init__(config)
+ self.config = config
+
+ if isinstance(config.attention_window, int):
+ assert config.attention_window % 2 == 0, "`config.attention_window` has to be an even value"
+ assert config.attention_window > 0, "`config.attention_window` has to be positive"
+ config.attention_window = [config.attention_window] * config.num_hidden_layers # one value per layer
+ else:
+ assert len(config.attention_window) == config.num_hidden_layers, (
+ "`len(config.attention_window)` should equal `config.num_hidden_layers`. "
+ f"Expected {config.num_hidden_layers}, given {len(config.attention_window)}"
+ )
+
+ self.embeddings = LongformerEmbeddings(config)
+ self.encoder = LongformerEncoder(config)
+ self.pooler = LongformerPooler(config) if add_pooling_layer else None
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.word_embeddings = value
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ def _pad_to_window_size(
+ self,
+ input_ids: torch.Tensor,
+ attention_mask: torch.Tensor,
+ token_type_ids: torch.Tensor,
+ position_ids: torch.Tensor,
+ inputs_embeds: torch.Tensor,
+ pad_token_id: int,
+ ):
+ """A helper function to pad tokens and mask to work with implementation of Longformer self-attention."""
+ # padding
+ attention_window = (
+ self.config.attention_window
+ if isinstance(self.config.attention_window, int)
+ else max(self.config.attention_window)
+ )
+
+ assert attention_window % 2 == 0, f"`attention_window` should be an even value. Given {attention_window}"
+ input_shape = input_ids.shape if input_ids is not None else inputs_embeds.shape
+ batch_size, seq_len = input_shape[:2]
+
+ padding_len = (attention_window - seq_len % attention_window) % attention_window
+
+ # this path should be recorded in the ONNX export, it is fine with padding_len == 0 as well
+ if padding_len > 0:
+ logger.warning_once(
+ f"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of "
+ f"`config.attention_window`: {attention_window}"
+ )
+ if input_ids is not None:
+ input_ids = nn.functional.pad(input_ids, (0, padding_len), value=pad_token_id)
+ if position_ids is not None:
+ # pad with position_id = pad_token_id as in modeling_roberta.RobertaEmbeddings
+ position_ids = nn.functional.pad(position_ids, (0, padding_len), value=pad_token_id)
+ if inputs_embeds is not None:
+ input_ids_padding = inputs_embeds.new_full(
+ (batch_size, padding_len),
+ self.config.pad_token_id,
+ dtype=torch.long,
+ )
+ inputs_embeds_padding = self.embeddings(input_ids_padding)
+ inputs_embeds = torch.cat([inputs_embeds, inputs_embeds_padding], dim=-2)
+
+ attention_mask = nn.functional.pad(
+ attention_mask, (0, padding_len), value=0
+ ) # no attention on the padding tokens
+ token_type_ids = nn.functional.pad(token_type_ids, (0, padding_len), value=0) # pad with token_type_id = 0
+
+ return padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds
+
+ def _merge_to_attention_mask(self, attention_mask: torch.Tensor, global_attention_mask: torch.Tensor):
+ # longformer self attention expects attention mask to have 0 (no attn), 1 (local attn), 2 (global attn)
+ # (global_attention_mask + 1) => 1 for local attention, 2 for global attention
+ # => final attention_mask => 0 for no attention, 1 for local attention 2 for global attention
+ if attention_mask is not None:
+ attention_mask = attention_mask * (global_attention_mask + 1)
+ else:
+ # simply use `global_attention_mask` as `attention_mask`
+ # if no `attention_mask` is given
+ attention_mask = global_attention_mask + 1
+ return attention_mask
+
+ @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=LongformerBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ global_attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, LongformerBaseModelOutputWithPooling]:
+ r"""
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> import torch
+ >>> from transformers import LongformerModel, AutoTokenizer
+
+ >>> model = LongformerModel.from_pretrained("allenai/longformer-base-4096")
+ >>> tokenizer = AutoTokenizer.from_pretrained("allenai/longformer-base-4096")
+
+ >>> SAMPLE_TEXT = " ".join(["Hello world! "] * 1000) # long input document
+ >>> input_ids = torch.tensor(tokenizer.encode(SAMPLE_TEXT)).unsqueeze(0) # batch of size 1
+
+ >>> attention_mask = torch.ones(
+ ... input_ids.shape, dtype=torch.long, device=input_ids.device
+ ... ) # initialize to local attention
+ >>> global_attention_mask = torch.zeros(
+ ... input_ids.shape, dtype=torch.long, device=input_ids.device
+ ... ) # initialize to global attention to be deactivated for all tokens
+ >>> global_attention_mask[
+ ... :,
+ ... [
+ ... 1,
+ ... 4,
+ ... 21,
+ ... ],
+ ... ] = 1 # Set global attention to random tokens for the sake of this example
+ >>> # Usually, set global attention based on the task. For example,
+ >>> # classification: the token
+ >>> # QA: question tokens
+ >>> # LM: potentially on the beginning of sentences and paragraphs
+ >>> outputs = model(input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask)
+ >>> sequence_output = outputs.last_hidden_state
+ >>> pooled_output = outputs.pooler_output
+ ```"""
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ if attention_mask is None:
+ attention_mask = torch.ones(input_shape, device=device)
+ if token_type_ids is None:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
+
+ # merge `global_attention_mask` and `attention_mask`
+ if global_attention_mask is not None:
+ attention_mask = self._merge_to_attention_mask(attention_mask, global_attention_mask)
+
+ padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds = self._pad_to_window_size(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ pad_token_id=self.config.pad_token_id,
+ )
+
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
+ # ourselves in which case we just need to make it broadcastable to all heads.
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)[
+ :, 0, 0, :
+ ]
+
+ embedding_output = self.embeddings(
+ input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
+ )
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ padding_len=padding_len,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
+
+ if not return_dict:
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
+
+ return LongformerBaseModelOutputWithPooling(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ global_attentions=encoder_outputs.global_attentions,
+ )
+
+
+@add_start_docstrings("""Longformer Model with a `language modeling` head on top.""", LONGFORMER_START_DOCSTRING)
+class LongformerForMaskedLM(LongformerPreTrainedModel):
+ _tied_weights_keys = ["lm_head.decoder"]
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.longformer = LongformerModel(config, add_pooling_layer=False)
+ self.lm_head = LongformerLMHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.lm_head.decoder
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head.decoder = new_embeddings
+
+ @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=LongformerMaskedLMOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ global_attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, LongformerMaskedLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ kwargs (`Dict[str, any]`, optional, defaults to *{}*):
+ Used to hide legacy arguments that have been deprecated.
+
+ Returns:
+
+ Mask filling example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, LongformerForMaskedLM
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("allenai/longformer-base-4096")
+ >>> model = LongformerForMaskedLM.from_pretrained("allenai/longformer-base-4096")
+ ```
+
+ Let's try a very long input.
+
+ ```python
+ >>> TXT = (
+ ... "My friends are but they eat too many carbs."
+ ... + " That's why I decide not to eat with them." * 300
+ ... )
+ >>> input_ids = tokenizer([TXT], return_tensors="pt")["input_ids"]
+ >>> logits = model(input_ids).logits
+
+ >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
+ >>> probs = logits[0, masked_index].softmax(dim=0)
+ >>> values, predictions = probs.topk(5)
+
+ >>> tokenizer.decode(predictions).split()
+ ['healthy', 'skinny', 'thin', 'good', 'vegetarian']
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.longformer(
+ input_ids,
+ attention_mask=attention_mask,
+ global_attention_mask=global_attention_mask,
+ head_mask=head_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = outputs[0]
+ prediction_scores = self.lm_head(sequence_output)
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+
+ labels = labels.to(prediction_scores.device)
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[2:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return LongformerMaskedLMOutput(
+ loss=masked_lm_loss,
+ logits=prediction_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ global_attentions=outputs.global_attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Longformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the
+ pooled output) e.g. for GLUE tasks.
+ """,
+ LONGFORMER_START_DOCSTRING,
+)
+class LongformerForSequenceClassification(LongformerPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.config = config
+
+ self.longformer = LongformerModel(config, add_pooling_layer=False)
+ self.classifier = LongformerClassificationHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint="jpwahle/longformer-base-plagiarism-detection",
+ output_type=LongformerSequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output="'ORIGINAL'",
+ expected_loss=5.44,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ global_attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, LongformerSequenceClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if global_attention_mask is None:
+ logger.warning_once("Initializing global attention on CLS token...")
+ global_attention_mask = torch.zeros_like(input_ids)
+ # global attention on cls token
+ global_attention_mask[:, 0] = 1
+
+ outputs = self.longformer(
+ input_ids,
+ attention_mask=attention_mask,
+ global_attention_mask=global_attention_mask,
+ head_mask=head_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = outputs[0]
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ labels = labels.to(logits.device)
+
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return LongformerSequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ global_attentions=outputs.global_attentions,
+ )
+
+
+class LongformerClassificationHead(nn.Module):
+ """Head for sentence-level classification tasks."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
+
+ def forward(self, hidden_states, **kwargs):
+ hidden_states = hidden_states[:, 0, :] # take token (equiv. to [CLS])
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.dense(hidden_states)
+ hidden_states = torch.tanh(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ output = self.out_proj(hidden_states)
+ return output
+
+
+@add_start_docstrings(
+ """
+ Longformer Model with a span classification head on top for extractive question-answering tasks like SQuAD /
+ TriviaQA (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ LONGFORMER_START_DOCSTRING,
+)
+class LongformerForQuestionAnswering(LongformerPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.longformer = LongformerModel(config, add_pooling_layer=False)
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=LongformerQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ global_attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ start_positions: Optional[torch.Tensor] = None,
+ end_positions: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, LongformerQuestionAnsweringModelOutput]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, LongformerForQuestionAnswering
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("allenai/longformer-large-4096-finetuned-triviaqa")
+ >>> model = LongformerForQuestionAnswering.from_pretrained("allenai/longformer-large-4096-finetuned-triviaqa")
+
+ >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
+ >>> encoding = tokenizer(question, text, return_tensors="pt")
+ >>> input_ids = encoding["input_ids"]
+
+ >>> # default is local attention everywhere
+ >>> # the forward method will automatically set global attention on question tokens
+ >>> attention_mask = encoding["attention_mask"]
+
+ >>> outputs = model(input_ids, attention_mask=attention_mask)
+ >>> start_logits = outputs.start_logits
+ >>> end_logits = outputs.end_logits
+ >>> all_tokens = tokenizer.convert_ids_to_tokens(input_ids[0].tolist())
+
+ >>> answer_tokens = all_tokens[torch.argmax(start_logits) : torch.argmax(end_logits) + 1]
+ >>> answer = tokenizer.decode(
+ ... tokenizer.convert_tokens_to_ids(answer_tokens)
+ ... ) # remove space prepending space token
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if global_attention_mask is None:
+ if input_ids is None:
+ logger.warning(
+ "It is not possible to automatically generate the `global_attention_mask` because input_ids is"
+ " None. Please make sure that it is correctly set."
+ )
+ else:
+ # set global attention on question tokens automatically
+ global_attention_mask = _compute_global_attention_mask(input_ids, self.config.sep_token_id)
+
+ outputs = self.longformer(
+ input_ids,
+ attention_mask=attention_mask,
+ global_attention_mask=global_attention_mask,
+ head_mask=head_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = logits.split(1, dim=-1)
+ start_logits = start_logits.squeeze(-1).contiguous()
+ end_logits = end_logits.squeeze(-1).contiguous()
+
+ total_loss = None
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, split add a dimension
+ if len(start_positions.size()) > 1:
+ start_positions = start_positions.squeeze(-1)
+ if len(end_positions.size()) > 1:
+ end_positions = end_positions.squeeze(-1)
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
+ ignored_index = start_logits.size(1)
+ start_positions = start_positions.clamp(0, ignored_index)
+ end_positions = end_positions.clamp(0, ignored_index)
+
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[2:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return LongformerQuestionAnsweringModelOutput(
+ loss=total_loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ global_attentions=outputs.global_attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Longformer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
+ for Named-Entity-Recognition (NER) tasks.
+ """,
+ LONGFORMER_START_DOCSTRING,
+)
+class LongformerForTokenClassification(LongformerPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.longformer = LongformerModel(config, add_pooling_layer=False)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint="brad1141/Longformer-finetuned-norm",
+ output_type=LongformerTokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=(
+ "['Evidence', 'Evidence', 'Evidence', 'Evidence', 'Evidence', 'Evidence', 'Evidence', 'Evidence',"
+ " 'Evidence', 'Evidence', 'Evidence', 'Evidence']"
+ ),
+ expected_loss=0.63,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ global_attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, LongformerTokenClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.longformer(
+ input_ids,
+ attention_mask=attention_mask,
+ global_attention_mask=global_attention_mask,
+ head_mask=head_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ sequence_output = self.dropout(sequence_output)
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+
+ labels = labels.to(logits.device)
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return LongformerTokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ global_attentions=outputs.global_attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Longformer Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
+ a softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ LONGFORMER_START_DOCSTRING,
+)
+class LongformerForMultipleChoice(LongformerPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.longformer = LongformerModel(config)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.classifier = nn.Linear(config.hidden_size, 1)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(
+ LONGFORMER_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
+ )
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=LongformerMultipleChoiceModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ global_attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, LongformerMultipleChoiceModelOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
+ `input_ids` above)
+ """
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # set global attention on question tokens
+ if global_attention_mask is None and input_ids is not None:
+ logger.warning_once("Initializing global attention on multiple choice...")
+ # put global attention on all tokens after `config.sep_token_id`
+ global_attention_mask = torch.stack(
+ [
+ _compute_global_attention_mask(input_ids[:, i], self.config.sep_token_id, before_sep_token=False)
+ for i in range(num_choices)
+ ],
+ dim=1,
+ )
+
+ flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
+ flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
+ flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
+ flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
+ flat_global_attention_mask = (
+ global_attention_mask.view(-1, global_attention_mask.size(-1))
+ if global_attention_mask is not None
+ else None
+ )
+ flat_inputs_embeds = (
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
+ if inputs_embeds is not None
+ else None
+ )
+
+ outputs = self.longformer(
+ flat_input_ids,
+ position_ids=flat_position_ids,
+ token_type_ids=flat_token_type_ids,
+ attention_mask=flat_attention_mask,
+ global_attention_mask=flat_global_attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=flat_inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ pooled_output = outputs[1]
+
+ pooled_output = self.dropout(pooled_output)
+ logits = self.classifier(pooled_output)
+ reshaped_logits = logits.view(-1, num_choices)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+
+ labels = labels.to(reshaped_logits.device)
+ loss = loss_fct(reshaped_logits, labels)
+
+ if not return_dict:
+ output = (reshaped_logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return LongformerMultipleChoiceModelOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ global_attentions=outputs.global_attentions,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/longformer/modeling_tf_longformer.py b/venv/lib/python3.10/site-packages/transformers/models/longformer/modeling_tf_longformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..907fbbddf1e68f86fe21223d9ed7a1b4453c525a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/longformer/modeling_tf_longformer.py
@@ -0,0 +1,2778 @@
+# coding=utf-8
+# Copyright 2020 The Allen Institute for AI team and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tensorflow Longformer model."""
+
+
+from __future__ import annotations
+
+import warnings
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ...activations_tf import get_tf_activation
+from ...modeling_tf_utils import (
+ TFMaskedLanguageModelingLoss,
+ TFModelInputType,
+ TFMultipleChoiceLoss,
+ TFPreTrainedModel,
+ TFQuestionAnsweringLoss,
+ TFSequenceClassificationLoss,
+ TFTokenClassificationLoss,
+ get_initializer,
+ keras,
+ keras_serializable,
+ unpack_inputs,
+)
+from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+)
+from .configuration_longformer import LongformerConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "allenai/longformer-base-4096"
+_CONFIG_FOR_DOC = "LongformerConfig"
+
+LARGE_NEGATIVE = -1e8
+
+
+from ..deprecated._archive_maps import TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+@dataclass
+class TFLongformerBaseModelOutput(ModelOutput):
+ """
+ Base class for Longformer's outputs, with potential hidden states, local and global attentions.
+
+ Args:
+ last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
+ attention_window + 1)`, where `x` is the number of tokens with global attention mask.
+
+ Local attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token in the sequence to every token with
+ global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
+ remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
+ token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
+ (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
+ If the attention window contains a token with global attention, the attention weight at the corresponding
+ index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
+ attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
+ accessed from `global_attentions`.
+ global_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x`
+ is the number of tokens with global attention mask.
+
+ Global attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token with global attention to every token
+ in the sequence.
+ """
+
+ last_hidden_state: tf.Tensor = None
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
+ attentions: Tuple[tf.Tensor, ...] | None = None
+ global_attentions: Tuple[tf.Tensor, ...] | None = None
+
+
+@dataclass
+class TFLongformerBaseModelOutputWithPooling(ModelOutput):
+ """
+ Base class for Longformer's outputs that also contains a pooling of the last hidden states.
+
+ Args:
+ last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ pooler_output (`tf.Tensor` of shape `(batch_size, hidden_size)`):
+ Last layer hidden-state of the first token of the sequence (classification token) further processed by a
+ Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence
+ prediction (classification) objective during pretraining.
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
+ attention_window + 1)`, where `x` is the number of tokens with global attention mask.
+
+ Local attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token in the sequence to every token with
+ global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
+ remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
+ token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
+ (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
+ If the attention window contains a token with global attention, the attention weight at the corresponding
+ index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
+ attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
+ accessed from `global_attentions`.
+ global_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x`
+ is the number of tokens with global attention mask.
+
+ Global attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token with global attention to every token
+ in the sequence.
+ """
+
+ last_hidden_state: tf.Tensor = None
+ pooler_output: tf.Tensor = None
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
+ attentions: Tuple[tf.Tensor, ...] | None = None
+ global_attentions: Tuple[tf.Tensor, ...] | None = None
+
+
+@dataclass
+class TFLongformerMaskedLMOutput(ModelOutput):
+ """
+ Base class for masked language models outputs.
+
+ Args:
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Masked language modeling (MLM) loss.
+ logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
+ attention_window + 1)`, where `x` is the number of tokens with global attention mask.
+
+ Local attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token in the sequence to every token with
+ global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
+ remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
+ token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
+ (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
+ If the attention window contains a token with global attention, the attention weight at the corresponding
+ index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
+ attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
+ accessed from `global_attentions`.
+ global_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x`
+ is the number of tokens with global attention mask.
+
+ Global attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token with global attention to every token
+ in the sequence.
+ """
+
+ loss: tf.Tensor | None = None
+ logits: tf.Tensor = None
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
+ attentions: Tuple[tf.Tensor, ...] | None = None
+ global_attentions: Tuple[tf.Tensor, ...] | None = None
+
+
+@dataclass
+class TFLongformerQuestionAnsweringModelOutput(ModelOutput):
+ """
+ Base class for outputs of question answering Longformer models.
+
+ Args:
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
+ start_logits (`tf.Tensor` of shape `(batch_size, sequence_length)`):
+ Span-start scores (before SoftMax).
+ end_logits (`tf.Tensor` of shape `(batch_size, sequence_length)`):
+ Span-end scores (before SoftMax).
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
+ attention_window + 1)`, where `x` is the number of tokens with global attention mask.
+
+ Local attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token in the sequence to every token with
+ global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
+ remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
+ token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
+ (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
+ If the attention window contains a token with global attention, the attention weight at the corresponding
+ index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
+ attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
+ accessed from `global_attentions`.
+ global_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x`
+ is the number of tokens with global attention mask.
+
+ Global attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token with global attention to every token
+ in the sequence.
+ """
+
+ loss: tf.Tensor | None = None
+ start_logits: tf.Tensor = None
+ end_logits: tf.Tensor = None
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
+ attentions: Tuple[tf.Tensor, ...] | None = None
+ global_attentions: Tuple[tf.Tensor, ...] | None = None
+
+
+@dataclass
+class TFLongformerSequenceClassifierOutput(ModelOutput):
+ """
+ Base class for outputs of sentence classification models.
+
+ Args:
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Classification (or regression if config.num_labels==1) loss.
+ logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`):
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
+ attention_window + 1)`, where `x` is the number of tokens with global attention mask.
+
+ Local attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token in the sequence to every token with
+ global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
+ remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
+ token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
+ (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
+ If the attention window contains a token with global attention, the attention weight at the corresponding
+ index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
+ attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
+ accessed from `global_attentions`.
+ global_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x`
+ is the number of tokens with global attention mask.
+
+ Global attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token with global attention to every token
+ in the sequence.
+ """
+
+ loss: tf.Tensor | None = None
+ logits: tf.Tensor = None
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
+ attentions: Tuple[tf.Tensor, ...] | None = None
+ global_attentions: Tuple[tf.Tensor, ...] | None = None
+
+
+@dataclass
+class TFLongformerMultipleChoiceModelOutput(ModelOutput):
+ """
+ Base class for outputs of multiple choice models.
+
+ Args:
+ loss (`tf.Tensor` of shape *(1,)*, *optional*, returned when `labels` is provided):
+ Classification loss.
+ logits (`tf.Tensor` of shape `(batch_size, num_choices)`):
+ *num_choices* is the second dimension of the input tensors. (see *input_ids* above).
+
+ Classification scores (before SoftMax).
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
+ attention_window + 1)`, where `x` is the number of tokens with global attention mask.
+
+ Local attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token in the sequence to every token with
+ global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
+ remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
+ token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
+ (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
+ If the attention window contains a token with global attention, the attention weight at the corresponding
+ index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
+ attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
+ accessed from `global_attentions`.
+ global_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x`
+ is the number of tokens with global attention mask.
+
+ Global attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token with global attention to every token
+ in the sequence.
+ """
+
+ loss: tf.Tensor | None = None
+ logits: tf.Tensor = None
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
+ attentions: Tuple[tf.Tensor, ...] | None = None
+ global_attentions: Tuple[tf.Tensor, ...] | None = None
+
+
+@dataclass
+class TFLongformerTokenClassifierOutput(ModelOutput):
+ """
+ Base class for outputs of token classification models.
+
+ Args:
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided) :
+ Classification loss.
+ logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.num_labels)`):
+ Classification scores (before SoftMax).
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
+ attention_window + 1)`, where `x` is the number of tokens with global attention mask.
+
+ Local attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token in the sequence to every token with
+ global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
+ remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
+ token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
+ (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
+ If the attention window contains a token with global attention, the attention weight at the corresponding
+ index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
+ attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
+ accessed from `global_attentions`.
+ global_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, where `x`
+ is the number of tokens with global attention mask.
+
+ Global attentions weights after the attention softmax, used to compute the weighted average in the
+ self-attention heads. Those are the attention weights from every token with global attention to every token
+ in the sequence.
+ """
+
+ loss: tf.Tensor | None = None
+ logits: tf.Tensor = None
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
+ attentions: Tuple[tf.Tensor, ...] | None = None
+ global_attentions: Tuple[tf.Tensor, ...] | None = None
+
+
+def _compute_global_attention_mask(input_ids_shape, sep_token_indices, before_sep_token=True):
+ """
+ Computes global attention mask by putting attention on all tokens before `sep_token_id` if `before_sep_token is
+ True` else after `sep_token_id`.
+ """
+ assert shape_list(sep_token_indices)[1] == 2, "`input_ids` should have two dimensions"
+ question_end_index = tf.reshape(sep_token_indices, (input_ids_shape[0], 3, 2))[:, 0, 1][:, None]
+ # bool attention mask with True in locations of global attention
+ attention_mask = tf.expand_dims(tf.range(input_ids_shape[1], dtype=tf.int64), axis=0)
+ attention_mask = tf.tile(attention_mask, (input_ids_shape[0], 1))
+ if before_sep_token is True:
+ question_end_index = tf.tile(question_end_index, (1, input_ids_shape[1]))
+ attention_mask = tf.cast(attention_mask < question_end_index, dtype=question_end_index.dtype)
+ else:
+ # last token is separation token and should not be counted and in the middle are two separation tokens
+ question_end_index = tf.tile(question_end_index + 1, (1, input_ids_shape[1]))
+ attention_mask = tf.cast(
+ attention_mask > question_end_index,
+ dtype=question_end_index.dtype,
+ ) * tf.cast(attention_mask < input_ids_shape[-1], dtype=question_end_index.dtype)
+
+ return attention_mask
+
+
+# Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaLMHead with Roberta->Longformer
+class TFLongformerLMHead(keras.layers.Layer):
+ """Longformer Head for masked language modeling."""
+
+ def __init__(self, config, input_embeddings, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.hidden_size = config.hidden_size
+ self.dense = keras.layers.Dense(
+ config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
+ self.act = get_tf_activation("gelu")
+
+ # The output weights are the same as the input embeddings, but there is
+ # an output-only bias for each token.
+ self.decoder = input_embeddings
+
+ def build(self, input_shape=None):
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+ if getattr(self, "layer_norm", None) is not None:
+ with tf.name_scope(self.layer_norm.name):
+ self.layer_norm.build([None, None, self.config.hidden_size])
+
+ def get_output_embeddings(self):
+ return self.decoder
+
+ def set_output_embeddings(self, value):
+ self.decoder.weight = value
+ self.decoder.vocab_size = shape_list(value)[0]
+
+ def get_bias(self):
+ return {"bias": self.bias}
+
+ def set_bias(self, value):
+ self.bias = value["bias"]
+ self.config.vocab_size = shape_list(value["bias"])[0]
+
+ def call(self, hidden_states):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.act(hidden_states)
+ hidden_states = self.layer_norm(hidden_states)
+
+ # project back to size of vocabulary with bias
+ seq_length = shape_list(tensor=hidden_states)[1]
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size])
+ hidden_states = tf.matmul(a=hidden_states, b=self.decoder.weight, transpose_b=True)
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
+
+ return hidden_states
+
+
+class TFLongformerEmbeddings(keras.layers.Layer):
+ """
+ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing and some extra casting.
+ """
+
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.padding_idx = 1
+ self.config = config
+ self.hidden_size = config.hidden_size
+ self.max_position_embeddings = config.max_position_embeddings
+ self.initializer_range = config.initializer_range
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
+
+ def build(self, input_shape=None):
+ with tf.name_scope("word_embeddings"):
+ self.weight = self.add_weight(
+ name="weight",
+ shape=[self.config.vocab_size, self.hidden_size],
+ initializer=get_initializer(self.initializer_range),
+ )
+
+ with tf.name_scope("token_type_embeddings"):
+ self.token_type_embeddings = self.add_weight(
+ name="embeddings",
+ shape=[self.config.type_vocab_size, self.hidden_size],
+ initializer=get_initializer(self.initializer_range),
+ )
+
+ with tf.name_scope("position_embeddings"):
+ self.position_embeddings = self.add_weight(
+ name="embeddings",
+ shape=[self.max_position_embeddings, self.hidden_size],
+ initializer=get_initializer(self.initializer_range),
+ )
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.hidden_size])
+
+ def create_position_ids_from_input_ids(self, input_ids, past_key_values_length=0):
+ """
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
+ symbols are ignored. This is modified from fairseq's `utils.make_positions`.
+
+ Args:
+ input_ids: tf.Tensor
+ Returns: tf.Tensor
+ """
+ mask = tf.cast(tf.math.not_equal(input_ids, self.padding_idx), dtype=input_ids.dtype)
+ incremental_indices = (tf.math.cumsum(mask, axis=1) + past_key_values_length) * mask
+
+ return incremental_indices + self.padding_idx
+
+ def call(
+ self,
+ input_ids=None,
+ position_ids=None,
+ token_type_ids=None,
+ inputs_embeds=None,
+ past_key_values_length=0,
+ training=False,
+ ):
+ """
+ Applies embedding based on inputs tensor.
+
+ Returns:
+ final_embeddings (`tf.Tensor`): output embedding tensor.
+ """
+ assert not (input_ids is None and inputs_embeds is None)
+
+ if input_ids is not None:
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
+
+ input_shape = shape_list(inputs_embeds)[:-1]
+
+ if token_type_ids is None:
+ token_type_ids = tf.cast(tf.fill(dims=input_shape, value=0), tf.int64)
+
+ if position_ids is None:
+ if input_ids is not None:
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
+ position_ids = self.create_position_ids_from_input_ids(
+ input_ids=input_ids, past_key_values_length=past_key_values_length
+ )
+ else:
+ position_ids = tf.expand_dims(
+ tf.range(start=self.padding_idx + 1, limit=input_shape[-1] + self.padding_idx + 1, dtype=tf.int64),
+ axis=0,
+ )
+
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
+ token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
+ final_embeddings = inputs_embeds + position_embeds + token_type_embeds
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
+
+ return final_embeddings
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->Longformer
+class TFLongformerIntermediate(keras.layers.Layer):
+ def __init__(self, config: LongformerConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = keras.layers.Dense(
+ units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
+ else:
+ self.intermediate_act_fn = config.hidden_act
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ hidden_states = self.dense(inputs=hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput with Bert->Longformer
+class TFLongformerOutput(keras.layers.Layer):
+ def __init__(self, config: LongformerConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = keras.layers.Dense(
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
+ hidden_states = self.dense(inputs=hidden_states)
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
+
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.intermediate_size])
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.hidden_size])
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->Longformer
+class TFLongformerPooler(keras.layers.Layer):
+ def __init__(self, config: LongformerConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = keras.layers.Dense(
+ units=config.hidden_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ activation="tanh",
+ name="dense",
+ )
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(inputs=first_token_tensor)
+
+ return pooled_output
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput with Bert->Longformer
+class TFLongformerSelfOutput(keras.layers.Layer):
+ def __init__(self, config: LongformerConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = keras.layers.Dense(
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
+ hidden_states = self.dense(inputs=hidden_states)
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
+
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.hidden_size])
+
+
+class TFLongformerSelfAttention(keras.layers.Layer):
+ def __init__(self, config, layer_id, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+
+ if config.hidden_size % config.num_attention_heads != 0:
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads}"
+ )
+
+ self.num_heads = config.num_attention_heads
+ self.head_dim = int(config.hidden_size / config.num_attention_heads)
+ self.embed_dim = config.hidden_size
+ self.query = keras.layers.Dense(
+ self.embed_dim,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="query",
+ )
+ self.key = keras.layers.Dense(
+ self.embed_dim,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="key",
+ )
+ self.value = keras.layers.Dense(
+ self.embed_dim,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="value",
+ )
+
+ # separate projection layers for tokens with global attention
+ self.query_global = keras.layers.Dense(
+ self.embed_dim,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="query_global",
+ )
+ self.key_global = keras.layers.Dense(
+ self.embed_dim,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="key_global",
+ )
+ self.value_global = keras.layers.Dense(
+ self.embed_dim,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="value_global",
+ )
+ self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob)
+ self.global_dropout = keras.layers.Dropout(config.attention_probs_dropout_prob)
+ self.layer_id = layer_id
+ attention_window = config.attention_window[self.layer_id]
+
+ assert (
+ attention_window % 2 == 0
+ ), f"`attention_window` for layer {self.layer_id} has to be an even value. Given {attention_window}"
+ assert (
+ attention_window > 0
+ ), f"`attention_window` for layer {self.layer_id} has to be positive. Given {attention_window}"
+
+ self.one_sided_attn_window_size = attention_window // 2
+
+ def build(self, input_shape=None):
+ if not self.built:
+ with tf.name_scope("query_global"):
+ self.query_global.build((self.config.hidden_size,))
+ with tf.name_scope("key_global"):
+ self.key_global.build((self.config.hidden_size,))
+ with tf.name_scope("value_global"):
+ self.value_global.build((self.config.hidden_size,))
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "query", None) is not None:
+ with tf.name_scope(self.query.name):
+ self.query.build([None, None, self.config.hidden_size])
+ if getattr(self, "key", None) is not None:
+ with tf.name_scope(self.key.name):
+ self.key.build([None, None, self.config.hidden_size])
+ if getattr(self, "value", None) is not None:
+ with tf.name_scope(self.value.name):
+ self.value.build([None, None, self.config.hidden_size])
+ if getattr(self, "query_global", None) is not None:
+ with tf.name_scope(self.query_global.name):
+ self.query_global.build([None, None, self.config.hidden_size])
+ if getattr(self, "key_global", None) is not None:
+ with tf.name_scope(self.key_global.name):
+ self.key_global.build([None, None, self.config.hidden_size])
+ if getattr(self, "value_global", None) is not None:
+ with tf.name_scope(self.value_global.name):
+ self.value_global.build([None, None, self.config.hidden_size])
+
+ def call(
+ self,
+ inputs,
+ training=False,
+ ):
+ """
+ LongformerSelfAttention expects *len(hidden_states)* to be multiple of *attention_window*. Padding to
+ *attention_window* happens in LongformerModel.forward to avoid redoing the padding on each layer.
+
+ The *attention_mask* is changed in [`LongformerModel.forward`] from 0, 1, 2 to:
+
+ - -10000: no attention
+ - 0: local attention
+ - +10000: global attention
+ """
+ # retrieve input args
+ (
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ is_index_masked,
+ is_index_global_attn,
+ is_global_attn,
+ ) = inputs
+
+ # project hidden states
+ query_vectors = self.query(hidden_states)
+ key_vectors = self.key(hidden_states)
+ value_vectors = self.value(hidden_states)
+ batch_size, seq_len, embed_dim = shape_list(hidden_states)
+
+ tf.debugging.assert_equal(
+ embed_dim,
+ self.embed_dim,
+ message=f"hidden_states should have embed_dim = {self.embed_dim}, but has {embed_dim}",
+ )
+
+ # normalize query
+ query_vectors /= tf.math.sqrt(tf.cast(self.head_dim, dtype=query_vectors.dtype))
+ query_vectors = tf.reshape(query_vectors, (batch_size, seq_len, self.num_heads, self.head_dim))
+ key_vectors = tf.reshape(key_vectors, (batch_size, seq_len, self.num_heads, self.head_dim))
+
+ # attn_probs = (batch_size, seq_len, num_heads, window*2+1)
+ attn_scores = self._sliding_chunks_query_key_matmul(
+ query_vectors, key_vectors, self.one_sided_attn_window_size
+ )
+
+ # values to pad for attention probs
+ remove_from_windowed_attention_mask = attention_mask != 0
+ # cast to fp32/fp16 then replace 1's with -inf
+ float_mask = tf.cast(remove_from_windowed_attention_mask, dtype=query_vectors.dtype) * LARGE_NEGATIVE
+
+ # diagonal mask with zeros everywhere and -inf inplace of padding
+ diagonal_mask = self._sliding_chunks_query_key_matmul(
+ tf.ones(shape_list(attention_mask)),
+ float_mask,
+ self.one_sided_attn_window_size,
+ )
+
+ # pad local attention probs
+ attn_scores += diagonal_mask
+
+ tf.debugging.assert_equal(
+ shape_list(attn_scores),
+ [batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + 1],
+ message=(
+ f"attn_probs should be of size ({batch_size}, {seq_len}, {self.num_heads},"
+ f" {self.one_sided_attn_window_size * 2 + 1}), but is of size {shape_list(attn_scores)}"
+ ),
+ )
+
+ # compute global attn indices required through out forward fn
+ (
+ max_num_global_attn_indices,
+ is_index_global_attn_nonzero,
+ is_local_index_global_attn_nonzero,
+ is_local_index_no_global_attn_nonzero,
+ ) = self._get_global_attn_indices(is_index_global_attn)
+
+ # this function is only relevant for global attention
+ if is_global_attn:
+ attn_scores = self._concat_with_global_key_attn_probs(
+ attn_scores=attn_scores,
+ query_vectors=query_vectors,
+ key_vectors=key_vectors,
+ max_num_global_attn_indices=max_num_global_attn_indices,
+ is_index_global_attn_nonzero=is_index_global_attn_nonzero,
+ is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
+ is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero,
+ )
+
+ attn_probs = stable_softmax(attn_scores, axis=-1)
+
+ # softmax sometimes inserts NaN if all positions are masked, replace them with 0
+ # Make sure to create a mask with the proper shape:
+ # if is_global_attn==True => [batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + max_num_global_attn_indices + 1]
+ # if is_global_attn==False => [batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + 1]
+ if is_global_attn:
+ masked_index = tf.tile(
+ is_index_masked[:, :, None, None],
+ (1, 1, self.num_heads, self.one_sided_attn_window_size * 2 + max_num_global_attn_indices + 1),
+ )
+ else:
+ masked_index = tf.tile(
+ is_index_masked[:, :, None, None],
+ (1, 1, self.num_heads, self.one_sided_attn_window_size * 2 + 1),
+ )
+ attn_probs = tf.where(
+ masked_index,
+ tf.zeros(shape_list(masked_index), dtype=attn_probs.dtype),
+ attn_probs,
+ )
+
+ if layer_head_mask is not None:
+ tf.debugging.assert_equal(
+ shape_list(layer_head_mask),
+ [self.num_heads],
+ message=(
+ f"Head mask for a single layer should be of size {(self.num_heads)}, but is"
+ f" {shape_list(layer_head_mask)}"
+ ),
+ )
+
+ attn_probs = tf.reshape(layer_head_mask, (1, 1, -1, 1)) * attn_probs
+
+ # apply dropout
+ attn_probs = self.dropout(attn_probs, training=training)
+ value_vectors = tf.reshape(value_vectors, (batch_size, seq_len, self.num_heads, self.head_dim))
+
+ # if global attention, compute sum of global and local attn
+
+ if is_global_attn:
+ attn_output = self._compute_attn_output_with_global_indices(
+ value_vectors=value_vectors,
+ attn_probs=attn_probs,
+ max_num_global_attn_indices=max_num_global_attn_indices,
+ is_index_global_attn_nonzero=is_index_global_attn_nonzero,
+ is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
+ )
+ else:
+ attn_output = self._sliding_chunks_matmul_attn_probs_value(
+ attn_probs, value_vectors, self.one_sided_attn_window_size
+ )
+
+ tf.debugging.assert_equal(
+ shape_list(attn_output), [batch_size, seq_len, self.num_heads, self.head_dim], message="Unexpected size"
+ )
+
+ attn_output = tf.reshape(attn_output, (batch_size, seq_len, embed_dim))
+
+ # compute value for global attention and overwrite to attention output
+ if is_global_attn:
+ attn_output, global_attn_probs = self._compute_global_attn_output_from_hidden(
+ attn_output=attn_output,
+ hidden_states=hidden_states,
+ max_num_global_attn_indices=max_num_global_attn_indices,
+ layer_head_mask=layer_head_mask,
+ is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
+ is_index_global_attn_nonzero=is_index_global_attn_nonzero,
+ is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero,
+ is_index_masked=is_index_masked,
+ training=training,
+ )
+ else:
+ # Leave attn_output unchanged
+ global_attn_probs = tf.zeros((batch_size, self.num_heads, max_num_global_attn_indices, seq_len))
+
+ # make sure that local attention probabilities are set to 0 for indices of global attn
+ # Make sure to create a mask with the proper shape:
+ # if is_global_attn==True => [batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + max_num_global_attn_indices + 1]
+ # if is_global_attn==False => [batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + 1]
+ if is_global_attn:
+ masked_global_attn_index = tf.tile(
+ is_index_global_attn[:, :, None, None],
+ (1, 1, self.num_heads, self.one_sided_attn_window_size * 2 + max_num_global_attn_indices + 1),
+ )
+ else:
+ masked_global_attn_index = tf.tile(
+ is_index_global_attn[:, :, None, None],
+ (1, 1, self.num_heads, self.one_sided_attn_window_size * 2 + 1),
+ )
+ attn_probs = tf.where(
+ masked_global_attn_index,
+ tf.zeros(shape_list(masked_global_attn_index), dtype=attn_probs.dtype),
+ attn_probs,
+ )
+
+ outputs = (attn_output, attn_probs, global_attn_probs)
+
+ return outputs
+
+ def _sliding_chunks_query_key_matmul(self, query, key, window_overlap):
+ """
+ Matrix multiplication of query and key tensors using with a sliding window attention pattern. This
+ implementation splits the input into overlapping chunks of size 2w (e.g. 512 for pretrained Longformer) with an
+ overlap of size window_overlap
+ """
+ batch_size, seq_len, num_heads, head_dim = shape_list(query)
+
+ tf.debugging.assert_equal(
+ seq_len % (window_overlap * 2),
+ 0,
+ message=f"Sequence length should be multiple of {window_overlap * 2}. Given {seq_len}",
+ )
+ tf.debugging.assert_equal(
+ shape_list(query),
+ shape_list(key),
+ message=(
+ f"Shape of query and key should be equal, but got query: {shape_list(query)} and key:"
+ f" {shape_list(key)}"
+ ),
+ )
+
+ chunks_count = seq_len // window_overlap - 1
+
+ # group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size window_overlap * 2
+ query = tf.reshape(
+ tf.transpose(query, (0, 2, 1, 3)),
+ (batch_size * num_heads, seq_len, head_dim),
+ )
+ key = tf.reshape(tf.transpose(key, (0, 2, 1, 3)), (batch_size * num_heads, seq_len, head_dim))
+ chunked_query = self._chunk(query, window_overlap)
+ chunked_key = self._chunk(key, window_overlap)
+
+ # matrix multiplication
+ # bcxd: batch_size * num_heads x chunks x 2window_overlap x head_dim
+ # bcyd: batch_size * num_heads x chunks x 2window_overlap x head_dim
+ # bcxy: batch_size * num_heads x chunks x 2window_overlap x 2window_overlap
+ chunked_query = tf.cast(chunked_query, dtype=chunked_key.dtype)
+ chunked_attention_scores = tf.einsum("bcxd,bcyd->bcxy", chunked_query, chunked_key) # multiply
+
+ # convert diagonals into columns
+ paddings = tf.convert_to_tensor([[0, 0], [0, 0], [0, 1], [0, 0]])
+ diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims(chunked_attention_scores, paddings)
+
+ # allocate space for the overall attention matrix where the chunks are combined. The last dimension
+ # has (window_overlap * 2 + 1) columns. The first (window_overlap) columns are the window_overlap lower triangles (attention from a word to
+ # window_overlap previous words). The following column is attention score from each word to itself, then
+ # followed by window_overlap columns for the upper triangle.
+
+ # copy parts from diagonal_chunked_attention_scores into the combined matrix of attentions
+ # - copying the main diagonal and the upper triangle
+ # TODO: This code is most likely not very efficient and should be improved
+ diagonal_attn_scores_up_triang = tf.concat(
+ [
+ diagonal_chunked_attention_scores[:, :, :window_overlap, : window_overlap + 1],
+ diagonal_chunked_attention_scores[:, -1:, window_overlap:, : window_overlap + 1],
+ ],
+ axis=1,
+ )
+
+ # - copying the lower triangle
+ diagonal_attn_scores_low_triang = tf.concat(
+ [
+ tf.zeros(
+ (batch_size * num_heads, 1, window_overlap, window_overlap),
+ dtype=diagonal_chunked_attention_scores.dtype,
+ ),
+ diagonal_chunked_attention_scores[:, :, -(window_overlap + 1) : -1, window_overlap + 1 :],
+ ],
+ axis=1,
+ )
+ diagonal_attn_scores_first_chunk = tf.concat(
+ [
+ tf.roll(
+ diagonal_chunked_attention_scores,
+ shift=[1, window_overlap],
+ axis=[2, 3],
+ )[:, :, :window_overlap, :window_overlap],
+ tf.zeros(
+ (batch_size * num_heads, 1, window_overlap, window_overlap),
+ dtype=diagonal_chunked_attention_scores.dtype,
+ ),
+ ],
+ axis=1,
+ )
+ first_chunk_mask = (
+ tf.tile(
+ tf.range(chunks_count + 1, dtype=tf.int64)[None, :, None, None],
+ (batch_size * num_heads, 1, window_overlap, window_overlap),
+ )
+ < 1
+ )
+ diagonal_attn_scores_low_triang = tf.where(
+ first_chunk_mask,
+ diagonal_attn_scores_first_chunk,
+ diagonal_attn_scores_low_triang,
+ )
+
+ # merging upper and lower triangle
+ diagonal_attention_scores = tf.concat(
+ [diagonal_attn_scores_low_triang, diagonal_attn_scores_up_triang], axis=-1
+ )
+
+ # separate batch_size and num_heads dimensions again
+ diagonal_attention_scores = tf.transpose(
+ tf.reshape(
+ diagonal_attention_scores,
+ (batch_size, num_heads, seq_len, 2 * window_overlap + 1),
+ ),
+ (0, 2, 1, 3),
+ )
+
+ diagonal_attention_scores = self._mask_invalid_locations(diagonal_attention_scores, window_overlap)
+
+ return diagonal_attention_scores
+
+ @staticmethod
+ def _mask_invalid_locations(input_tensor, window_overlap):
+ # create correct upper triangle bool mask
+ mask_2d_upper = tf.reverse(
+ tf.linalg.band_part(tf.ones(shape=(window_overlap, window_overlap + 1)), -1, 0),
+ axis=[0],
+ )
+
+ # pad to full matrix
+ padding = tf.convert_to_tensor(
+ [[0, shape_list(input_tensor)[1] - window_overlap], [0, shape_list(input_tensor)[3] - window_overlap - 1]]
+ )
+
+ # create lower mask
+ mask_2d = tf.pad(mask_2d_upper, padding)
+
+ # combine with upper mask
+ mask_2d = mask_2d + tf.reverse(mask_2d, axis=[0, 1])
+
+ # broadcast to full matrix
+ mask_4d = tf.tile(mask_2d[None, :, None, :], (shape_list(input_tensor)[0], 1, 1, 1))
+
+ # inf tensor used for masking
+ inf_tensor = -float("inf") * tf.ones_like(input_tensor)
+
+ # mask
+ input_tensor = tf.where(tf.math.greater(mask_4d, 0), inf_tensor, input_tensor)
+
+ return input_tensor
+
+ def _sliding_chunks_matmul_attn_probs_value(self, attn_probs, value, window_overlap):
+ """
+ Same as _sliding_chunks_query_key_matmul but for attn_probs and value tensors. Returned tensor will be of the
+ same shape as `attn_probs`
+ """
+
+ batch_size, seq_len, num_heads, head_dim = shape_list(value)
+
+ tf.debugging.assert_equal(
+ seq_len % (window_overlap * 2), 0, message="Seq_len has to be multiple of 2 * window_overlap"
+ )
+ tf.debugging.assert_equal(
+ shape_list(attn_probs)[:3],
+ shape_list(value)[:3],
+ message="value and attn_probs must have same dims (except head_dim)",
+ )
+ tf.debugging.assert_equal(
+ shape_list(attn_probs)[3],
+ 2 * window_overlap + 1,
+ message="attn_probs last dim has to be 2 * window_overlap + 1",
+ )
+
+ chunks_count = seq_len // window_overlap - 1
+
+ # group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size 2 window overlap
+ chunked_attn_probs = tf.reshape(
+ tf.transpose(attn_probs, (0, 2, 1, 3)),
+ (
+ batch_size * num_heads,
+ seq_len // window_overlap,
+ window_overlap,
+ 2 * window_overlap + 1,
+ ),
+ )
+
+ # group batch_size and num_heads dimensions into one
+ value = tf.reshape(
+ tf.transpose(value, (0, 2, 1, 3)),
+ (batch_size * num_heads, seq_len, head_dim),
+ )
+
+ # pad seq_len with w at the beginning of the sequence and another window overlap at the end
+ paddings = tf.convert_to_tensor([[0, 0], [window_overlap, window_overlap], [0, 0]])
+ padded_value = tf.pad(value, paddings, constant_values=-1)
+
+ # chunk padded_value into chunks of size 3 window overlap and an overlap of size window overlap
+ frame_size = 3 * window_overlap * head_dim
+ frame_hop_size = (shape_list(padded_value)[1] * head_dim - frame_size) // chunks_count
+ chunked_value = tf.signal.frame(
+ tf.reshape(padded_value, (batch_size * num_heads, -1)),
+ frame_size,
+ frame_hop_size,
+ )
+ chunked_value = tf.reshape(
+ chunked_value,
+ (batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim),
+ )
+
+ tf.debugging.assert_equal(
+ shape_list(chunked_value),
+ [batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim],
+ message="Chunked value has the wrong shape",
+ )
+
+ chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs)
+ context = tf.einsum("bcwd,bcdh->bcwh", chunked_attn_probs, chunked_value)
+ context = tf.transpose(
+ tf.reshape(context, (batch_size, num_heads, seq_len, head_dim)),
+ (0, 2, 1, 3),
+ )
+
+ return context
+
+ @staticmethod
+ def _pad_and_transpose_last_two_dims(hidden_states_padded, paddings):
+ """pads rows and then flips rows and columns"""
+ hidden_states_padded = tf.pad(
+ hidden_states_padded, paddings
+ ) # padding value is not important because it will be overwritten
+ batch_size, chunk_size, seq_length, hidden_dim = shape_list(hidden_states_padded)
+ hidden_states_padded = tf.reshape(hidden_states_padded, (batch_size, chunk_size, hidden_dim, seq_length))
+
+ return hidden_states_padded
+
+ @staticmethod
+ def _pad_and_diagonalize(chunked_hidden_states):
+ """
+ shift every row 1 step right, converting columns into diagonals.
+
+ Example:
+
+ ```python
+ chunked_hidden_states: [
+ 0.4983,
+ 2.6918,
+ -0.0071,
+ 1.0492,
+ -1.8348,
+ 0.7672,
+ 0.2986,
+ 0.0285,
+ -0.7584,
+ 0.4206,
+ -0.0405,
+ 0.1599,
+ 2.0514,
+ -1.1600,
+ 0.5372,
+ 0.2629,
+ ]
+ window_overlap = num_rows = 4
+ ```
+
+ (pad & diagonalize) => [ 0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000
+ 0.0000, -1.8348, 0.7672, 0.2986, 0.0285, 0.0000, 0.0000 0.0000, 0.0000, -0.7584, 0.4206,
+ -0.0405, 0.1599, 0.0000 0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629 ]
+ """
+ total_num_heads, num_chunks, window_overlap, hidden_dim = shape_list(chunked_hidden_states)
+ paddings = tf.convert_to_tensor([[0, 0], [0, 0], [0, 0], [0, window_overlap + 1]])
+ chunked_hidden_states = tf.pad(
+ chunked_hidden_states, paddings
+ ) # total_num_heads x num_chunks x window_overlap x (hidden_dim+window_overlap+1). Padding value is not important because it'll be overwritten
+ chunked_hidden_states = tf.reshape(
+ chunked_hidden_states, (total_num_heads, num_chunks, -1)
+ ) # total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap+window_overlap
+ chunked_hidden_states = chunked_hidden_states[
+ :, :, :-window_overlap
+ ] # total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap
+ chunked_hidden_states = tf.reshape(
+ chunked_hidden_states,
+ (total_num_heads, num_chunks, window_overlap, window_overlap + hidden_dim),
+ ) # total_num_heads x num_chunks, window_overlap x hidden_dim+window_overlap
+ chunked_hidden_states = chunked_hidden_states[:, :, :, :-1]
+
+ return chunked_hidden_states
+
+ @staticmethod
+ def _chunk(hidden_states, window_overlap):
+ """convert into overlapping chunks. Chunk size = 2w, overlap size = w"""
+ batch_size, seq_length, hidden_dim = shape_list(hidden_states)
+ num_output_chunks = 2 * (seq_length // (2 * window_overlap)) - 1
+
+ # define frame size and frame stride (similar to convolution)
+ frame_hop_size = window_overlap * hidden_dim
+ frame_size = 2 * frame_hop_size
+ hidden_states = tf.reshape(hidden_states, (batch_size, seq_length * hidden_dim))
+
+ # chunk with overlap
+ chunked_hidden_states = tf.signal.frame(hidden_states, frame_size, frame_hop_size)
+
+ tf.debugging.assert_equal(
+ shape_list(chunked_hidden_states),
+ [batch_size, num_output_chunks, frame_size],
+ message=(
+ "Make sure chunking is correctly applied. `Chunked hidden states should have output dimension"
+ f" {[batch_size, frame_size, num_output_chunks]}, but got {shape_list(chunked_hidden_states)}."
+ ),
+ )
+
+ chunked_hidden_states = tf.reshape(
+ chunked_hidden_states,
+ (batch_size, num_output_chunks, 2 * window_overlap, hidden_dim),
+ )
+
+ return chunked_hidden_states
+
+ @staticmethod
+ def _get_global_attn_indices(is_index_global_attn):
+ """compute global attn indices required throughout forward pass"""
+ # helper variable
+ num_global_attn_indices = tf.math.count_nonzero(is_index_global_attn, axis=1)
+ num_global_attn_indices = tf.cast(num_global_attn_indices, dtype=tf.constant(1).dtype)
+
+ # max number of global attn indices in batch
+ max_num_global_attn_indices = tf.reduce_max(num_global_attn_indices)
+
+ # indices of global attn
+ is_index_global_attn_nonzero = tf.where(is_index_global_attn)
+
+ # helper variable
+ is_local_index_global_attn = tf.range(max_num_global_attn_indices) < tf.expand_dims(
+ num_global_attn_indices, axis=-1
+ )
+
+ # location of the non-padding values within global attention indices
+ is_local_index_global_attn_nonzero = tf.where(is_local_index_global_attn)
+
+ # location of the padding values within global attention indices
+ is_local_index_no_global_attn_nonzero = tf.where(tf.math.logical_not(is_local_index_global_attn))
+
+ return (
+ max_num_global_attn_indices,
+ is_index_global_attn_nonzero,
+ is_local_index_global_attn_nonzero,
+ is_local_index_no_global_attn_nonzero,
+ )
+
+ def _concat_with_global_key_attn_probs(
+ self,
+ attn_scores,
+ key_vectors,
+ query_vectors,
+ max_num_global_attn_indices,
+ is_index_global_attn_nonzero,
+ is_local_index_global_attn_nonzero,
+ is_local_index_no_global_attn_nonzero,
+ ):
+ batch_size = shape_list(key_vectors)[0]
+
+ # select global key vectors
+ global_key_vectors = tf.gather_nd(key_vectors, is_index_global_attn_nonzero)
+
+ # create only global key vectors
+ key_vectors_only_global = tf.scatter_nd(
+ is_local_index_global_attn_nonzero,
+ global_key_vectors,
+ shape=(
+ batch_size,
+ max_num_global_attn_indices,
+ self.num_heads,
+ self.head_dim,
+ ),
+ )
+
+ # (batch_size, seq_len, num_heads, max_num_global_attn_indices)
+ attn_probs_from_global_key = tf.einsum("blhd,bshd->blhs", query_vectors, key_vectors_only_global)
+
+ # (batch_size, max_num_global_attn_indices, seq_len, num_heads)
+ attn_probs_from_global_key_trans = tf.transpose(attn_probs_from_global_key, (0, 3, 1, 2))
+ mask_shape = (shape_list(is_local_index_no_global_attn_nonzero)[0],) + tuple(
+ shape_list(attn_probs_from_global_key_trans)[-2:]
+ )
+ mask = tf.ones(mask_shape) * -10000.0
+ mask = tf.cast(mask, dtype=attn_probs_from_global_key_trans.dtype)
+
+ # scatter mask
+ attn_probs_from_global_key_trans = tf.tensor_scatter_nd_update(
+ attn_probs_from_global_key_trans,
+ is_local_index_no_global_attn_nonzero,
+ mask,
+ )
+
+ # (batch_size, seq_len, num_heads, max_num_global_attn_indices)
+ attn_probs_from_global_key = tf.transpose(attn_probs_from_global_key_trans, (0, 2, 3, 1))
+
+ # concat to attn_probs
+ # (batch_size, seq_len, num_heads, extra attention count + 2*window+1)
+ attn_scores = tf.concat((attn_probs_from_global_key, attn_scores), axis=-1)
+
+ return attn_scores
+
+ def _compute_attn_output_with_global_indices(
+ self,
+ value_vectors,
+ attn_probs,
+ max_num_global_attn_indices,
+ is_index_global_attn_nonzero,
+ is_local_index_global_attn_nonzero,
+ ):
+ batch_size = shape_list(attn_probs)[0]
+
+ # cut local attn probs to global only
+ attn_probs_only_global = attn_probs[:, :, :, :max_num_global_attn_indices]
+
+ # select global value vectors
+ global_value_vectors = tf.gather_nd(value_vectors, is_index_global_attn_nonzero)
+
+ # create only global value vectors
+ value_vectors_only_global = tf.scatter_nd(
+ is_local_index_global_attn_nonzero,
+ global_value_vectors,
+ shape=(
+ batch_size,
+ max_num_global_attn_indices,
+ self.num_heads,
+ self.head_dim,
+ ),
+ )
+
+ # compute attn output only global
+ attn_output_only_global = tf.einsum("blhs,bshd->blhd", attn_probs_only_global, value_vectors_only_global)
+
+ # reshape attn probs
+ attn_probs_without_global = attn_probs[:, :, :, max_num_global_attn_indices:]
+
+ # compute attn output with global
+ attn_output_without_global = self._sliding_chunks_matmul_attn_probs_value(
+ attn_probs_without_global, value_vectors, self.one_sided_attn_window_size
+ )
+
+ return attn_output_only_global + attn_output_without_global
+
+ def _compute_global_attn_output_from_hidden(
+ self,
+ attn_output,
+ hidden_states,
+ max_num_global_attn_indices,
+ layer_head_mask,
+ is_local_index_global_attn_nonzero,
+ is_index_global_attn_nonzero,
+ is_local_index_no_global_attn_nonzero,
+ is_index_masked,
+ training,
+ ):
+ batch_size, seq_len = shape_list(hidden_states)[:2]
+
+ # prepare global hidden states
+ global_attn_hidden_states = tf.gather_nd(hidden_states, is_index_global_attn_nonzero)
+ global_attn_hidden_states = tf.scatter_nd(
+ is_local_index_global_attn_nonzero,
+ global_attn_hidden_states,
+ shape=(batch_size, max_num_global_attn_indices, self.embed_dim),
+ )
+
+ # global key, query, value
+ global_query_vectors_only_global = self.query_global(global_attn_hidden_states)
+ global_key_vectors = self.key_global(hidden_states)
+ global_value_vectors = self.value_global(hidden_states)
+
+ # normalize
+ global_query_vectors_only_global /= tf.math.sqrt(
+ tf.cast(self.head_dim, dtype=global_query_vectors_only_global.dtype)
+ )
+ global_query_vectors_only_global = self.reshape_and_transpose(global_query_vectors_only_global, batch_size)
+ global_key_vectors = self.reshape_and_transpose(global_key_vectors, batch_size)
+ global_value_vectors = self.reshape_and_transpose(global_value_vectors, batch_size)
+
+ # compute attn scores
+ global_attn_scores = tf.matmul(global_query_vectors_only_global, global_key_vectors, transpose_b=True)
+
+ tf.debugging.assert_equal(
+ shape_list(global_attn_scores),
+ [batch_size * self.num_heads, max_num_global_attn_indices, seq_len],
+ message=(
+ "global_attn_scores have the wrong size. Size should be"
+ f" {(batch_size * self.num_heads, max_num_global_attn_indices, seq_len)}, but is"
+ f" {shape_list(global_attn_scores)}."
+ ),
+ )
+
+ global_attn_scores = tf.reshape(
+ global_attn_scores,
+ (batch_size, self.num_heads, max_num_global_attn_indices, seq_len),
+ )
+ global_attn_scores_trans = tf.transpose(global_attn_scores, (0, 2, 1, 3))
+ mask_shape = (shape_list(is_local_index_no_global_attn_nonzero)[0],) + tuple(
+ shape_list(global_attn_scores_trans)[-2:]
+ )
+ global_attn_mask = tf.ones(mask_shape) * -10000.0
+ global_attn_mask = tf.cast(global_attn_mask, dtype=global_attn_scores_trans.dtype)
+
+ # scatter mask
+ global_attn_scores_trans = tf.tensor_scatter_nd_update(
+ global_attn_scores_trans,
+ is_local_index_no_global_attn_nonzero,
+ global_attn_mask,
+ )
+ global_attn_scores = tf.transpose(global_attn_scores_trans, (0, 2, 1, 3))
+
+ # mask global attn scores
+ attn_mask = tf.tile(is_index_masked[:, None, None, :], (1, shape_list(global_attn_scores)[1], 1, 1))
+ global_attn_scores = tf.where(attn_mask, -10000.0, global_attn_scores)
+ global_attn_scores = tf.reshape(
+ global_attn_scores,
+ (batch_size * self.num_heads, max_num_global_attn_indices, seq_len),
+ )
+
+ # compute global attn probs
+ global_attn_probs_float = stable_softmax(global_attn_scores, axis=-1)
+
+ # apply layer head masking
+ if layer_head_mask is not None:
+ tf.debugging.assert_equal(
+ shape_list(layer_head_mask),
+ [self.num_heads],
+ message=(
+ f"Head mask for a single layer should be of size {(self.num_heads)}, but is"
+ f" {shape_list(layer_head_mask)}"
+ ),
+ )
+ global_attn_probs_float = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(
+ global_attn_probs_float, (batch_size, self.num_heads, max_num_global_attn_indices, seq_len)
+ )
+ global_attn_probs_float = tf.reshape(
+ global_attn_probs_float, (batch_size * self.num_heads, max_num_global_attn_indices, seq_len)
+ )
+
+ # dropout
+ global_attn_probs = self.global_dropout(global_attn_probs_float, training=training)
+
+ # global attn output
+ global_attn_output = tf.matmul(global_attn_probs, global_value_vectors)
+
+ tf.debugging.assert_equal(
+ shape_list(global_attn_output),
+ [batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim],
+ message=(
+ "global_attn_output tensor has the wrong size. Size should be"
+ f" {(batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim)}, but is"
+ f" {shape_list(global_attn_output)}."
+ ),
+ )
+
+ global_attn_output = tf.reshape(
+ global_attn_output,
+ (batch_size, self.num_heads, max_num_global_attn_indices, self.head_dim),
+ )
+
+ # get only non zero global attn output
+ nonzero_global_attn_output = tf.gather_nd(
+ tf.transpose(global_attn_output, (0, 2, 1, 3)),
+ is_local_index_global_attn_nonzero,
+ )
+ nonzero_global_attn_output = tf.reshape(
+ nonzero_global_attn_output,
+ (shape_list(is_local_index_global_attn_nonzero)[0], -1),
+ )
+
+ # overwrite values with global attention
+ attn_output = tf.tensor_scatter_nd_update(
+ attn_output, is_index_global_attn_nonzero, nonzero_global_attn_output
+ )
+
+ global_attn_probs = tf.reshape(
+ global_attn_probs, (batch_size, self.num_heads, max_num_global_attn_indices, seq_len)
+ )
+
+ return attn_output, global_attn_probs
+
+ def reshape_and_transpose(self, vector, batch_size):
+ return tf.reshape(
+ tf.transpose(
+ tf.reshape(vector, (batch_size, -1, self.num_heads, self.head_dim)),
+ (0, 2, 1, 3),
+ ),
+ (batch_size * self.num_heads, -1, self.head_dim),
+ )
+
+
+class TFLongformerAttention(keras.layers.Layer):
+ def __init__(self, config, layer_id=0, **kwargs):
+ super().__init__(**kwargs)
+
+ self.self_attention = TFLongformerSelfAttention(config, layer_id, name="self")
+ self.dense_output = TFLongformerSelfOutput(config, name="output")
+
+ def prune_heads(self, heads):
+ raise NotImplementedError
+
+ def call(self, inputs, training=False):
+ (
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ is_index_masked,
+ is_index_global_attn,
+ is_global_attn,
+ ) = inputs
+
+ self_outputs = self.self_attention(
+ [hidden_states, attention_mask, layer_head_mask, is_index_masked, is_index_global_attn, is_global_attn],
+ training=training,
+ )
+ attention_output = self.dense_output(self_outputs[0], hidden_states, training=training)
+ outputs = (attention_output,) + self_outputs[1:]
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "self_attention", None) is not None:
+ with tf.name_scope(self.self_attention.name):
+ self.self_attention.build(None)
+ if getattr(self, "dense_output", None) is not None:
+ with tf.name_scope(self.dense_output.name):
+ self.dense_output.build(None)
+
+
+class TFLongformerLayer(keras.layers.Layer):
+ def __init__(self, config, layer_id=0, **kwargs):
+ super().__init__(**kwargs)
+
+ self.attention = TFLongformerAttention(config, layer_id, name="attention")
+ self.intermediate = TFLongformerIntermediate(config, name="intermediate")
+ self.longformer_output = TFLongformerOutput(config, name="output")
+
+ def call(self, inputs, training=False):
+ (
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ is_index_masked,
+ is_index_global_attn,
+ is_global_attn,
+ ) = inputs
+
+ attention_outputs = self.attention(
+ [hidden_states, attention_mask, layer_head_mask, is_index_masked, is_index_global_attn, is_global_attn],
+ training=training,
+ )
+ attention_output = attention_outputs[0]
+ intermediate_output = self.intermediate(attention_output)
+ layer_output = self.longformer_output(intermediate_output, attention_output, training=training)
+ outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "attention", None) is not None:
+ with tf.name_scope(self.attention.name):
+ self.attention.build(None)
+ if getattr(self, "intermediate", None) is not None:
+ with tf.name_scope(self.intermediate.name):
+ self.intermediate.build(None)
+ if getattr(self, "longformer_output", None) is not None:
+ with tf.name_scope(self.longformer_output.name):
+ self.longformer_output.build(None)
+
+
+class TFLongformerEncoder(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.output_hidden_states = config.output_hidden_states
+ self.output_attentions = config.output_attentions
+ self.layer = [TFLongformerLayer(config, i, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
+
+ def call(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ padding_len=0,
+ is_index_masked=None,
+ is_index_global_attn=None,
+ is_global_attn=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ training=False,
+ ):
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = all_global_attentions = () if output_attentions else None
+
+ for idx, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ hidden_states_to_add = hidden_states[:, :-padding_len] if padding_len > 0 else hidden_states
+ all_hidden_states = all_hidden_states + (hidden_states_to_add,)
+
+ layer_outputs = layer_module(
+ [
+ hidden_states,
+ attention_mask,
+ head_mask[idx] if head_mask is not None else None,
+ is_index_masked,
+ is_index_global_attn,
+ is_global_attn,
+ ],
+ training=training,
+ )
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ # bzs x seq_len x num_attn_heads x (num_global_attn + attention_window_len + 1) => bzs x num_attn_heads x seq_len x (num_global_attn + attention_window_len + 1)
+ all_attentions = all_attentions + (tf.transpose(layer_outputs[1], (0, 2, 1, 3)),)
+
+ # bzs x num_attn_heads x num_global_attn x seq_len => bzs x num_attn_heads x seq_len x num_global_attn
+ all_global_attentions = all_global_attentions + (tf.transpose(layer_outputs[2], (0, 1, 3, 2)),)
+
+ # Add last layer
+ if output_hidden_states:
+ hidden_states_to_add = hidden_states[:, :-padding_len] if padding_len > 0 else hidden_states
+ all_hidden_states = all_hidden_states + (hidden_states_to_add,)
+
+ # undo padding
+ # unpad `hidden_states` because the calling function is expecting a length == input_ids.size(1)
+ hidden_states = hidden_states[:, :-padding_len] if padding_len > 0 else hidden_states
+ if output_attentions:
+ all_attentions = (
+ tuple([state[:, :, :-padding_len, :] for state in all_attentions])
+ if padding_len > 0
+ else all_attentions
+ )
+
+ if not return_dict:
+ return tuple(
+ v for v in [hidden_states, all_hidden_states, all_attentions, all_global_attentions] if v is not None
+ )
+
+ return TFLongformerBaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_attentions,
+ global_attentions=all_global_attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "layer", None) is not None:
+ for layer in self.layer:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+@keras_serializable
+class TFLongformerMainLayer(keras.layers.Layer):
+ config_class = LongformerConfig
+
+ def __init__(self, config, add_pooling_layer=True, **kwargs):
+ super().__init__(**kwargs)
+
+ if isinstance(config.attention_window, int):
+ assert config.attention_window % 2 == 0, "`config.attention_window` has to be an even value"
+ assert config.attention_window > 0, "`config.attention_window` has to be positive"
+ config.attention_window = [config.attention_window] * config.num_hidden_layers # one value per layer
+ else:
+ assert len(config.attention_window) == config.num_hidden_layers, (
+ "`len(config.attention_window)` should equal `config.num_hidden_layers`. "
+ f"Expected {config.num_hidden_layers}, given {len(config.attention_window)}"
+ )
+
+ self.config = config
+ self.num_hidden_layers = config.num_hidden_layers
+ self.initializer_range = config.initializer_range
+ self.output_attentions = config.output_attentions
+ self.output_hidden_states = config.output_hidden_states
+ self.return_dict = config.use_return_dict
+ self.pad_token_id = config.pad_token_id
+ self.attention_window = config.attention_window
+ self.embeddings = TFLongformerEmbeddings(config, name="embeddings")
+ self.encoder = TFLongformerEncoder(config, name="encoder")
+ self.pooler = TFLongformerPooler(config, name="pooler") if add_pooling_layer else None
+
+ def get_input_embeddings(self):
+ return self.embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.weight = value
+ self.embeddings.vocab_size = shape_list(value)[0]
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ raise NotImplementedError
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ head_mask=None,
+ global_attention_mask=None,
+ token_type_ids=None,
+ position_ids=None,
+ inputs_embeds=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ training=False,
+ ):
+ if input_ids is not None and not isinstance(input_ids, tf.Tensor):
+ input_ids = tf.convert_to_tensor(input_ids, dtype=tf.int64)
+ elif input_ids is not None:
+ input_ids = tf.cast(input_ids, tf.int64)
+
+ if attention_mask is not None and not isinstance(attention_mask, tf.Tensor):
+ attention_mask = tf.convert_to_tensor(attention_mask, dtype=tf.int64)
+ elif attention_mask is not None:
+ attention_mask = tf.cast(attention_mask, tf.int64)
+
+ if global_attention_mask is not None and not isinstance(global_attention_mask, tf.Tensor):
+ global_attention_mask = tf.convert_to_tensor(global_attention_mask, dtype=tf.int64)
+ elif global_attention_mask is not None:
+ global_attention_mask = tf.cast(global_attention_mask, tf.int64)
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = shape_list(input_ids)
+ elif inputs_embeds is not None:
+ input_shape = shape_list(inputs_embeds)[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if attention_mask is None:
+ attention_mask = tf.cast(tf.fill(input_shape, 1), tf.int64)
+
+ if token_type_ids is None:
+ token_type_ids = tf.cast(tf.fill(input_shape, 0), tf.int64)
+
+ # merge `global_attention_mask` and `attention_mask`
+ if global_attention_mask is not None:
+ attention_mask = self._merge_to_attention_mask(attention_mask, global_attention_mask)
+
+ (
+ padding_len,
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ inputs_embeds,
+ ) = self._pad_to_window_size(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ pad_token_id=self.pad_token_id,
+ )
+
+ # is index masked or global attention
+ is_index_masked = tf.math.less(attention_mask, 1)
+ is_index_global_attn = tf.math.greater(attention_mask, 1)
+ is_global_attn = tf.math.reduce_any(is_index_global_attn)
+
+ # We create a 3D attention mask from a 2D tensor mask.
+ # Sizes are [batch_size, to_seq_length, 1, 1]
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
+ # this attention mask is more simple than the triangular masking of causal attention
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
+ attention_mask_shape = shape_list(attention_mask)
+ extended_attention_mask = tf.reshape(attention_mask, (attention_mask_shape[0], attention_mask_shape[1], 1, 1))
+
+ # Since attention_mask is 1.0 for positions we want to attend locally and 0.0 for
+ # masked and global attn positions, this operation will create a tensor which is 0.0 for
+ # positions we want to attend and -10000.0 for masked positions.
+ # Since we are adding it to the raw scores before the softmax, this is
+ # effectively the same as removing these entirely.
+ extended_attention_mask = tf.cast(tf.math.abs(1 - extended_attention_mask), tf.dtypes.float32) * -10000.0
+ embedding_output = self.embeddings(
+ input_ids,
+ position_ids,
+ token_type_ids,
+ inputs_embeds,
+ training=training,
+ )
+ encoder_outputs = self.encoder(
+ embedding_output,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ padding_len=padding_len,
+ is_index_masked=is_index_masked,
+ is_index_global_attn=is_index_global_attn,
+ is_global_attn=is_global_attn,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = encoder_outputs[0]
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
+
+ if not return_dict:
+ return (
+ sequence_output,
+ pooled_output,
+ ) + encoder_outputs[1:]
+
+ return TFLongformerBaseModelOutputWithPooling(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ global_attentions=encoder_outputs.global_attentions,
+ )
+
+ def _pad_to_window_size(
+ self,
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ inputs_embeds,
+ pad_token_id,
+ ):
+ """A helper function to pad tokens and mask to work with implementation of Longformer selfattention."""
+ # padding
+ attention_window = (
+ self.attention_window if isinstance(self.attention_window, int) else max(self.attention_window)
+ )
+
+ assert attention_window % 2 == 0, f"`attention_window` should be an even value. Given {attention_window}"
+
+ input_shape = shape_list(input_ids) if input_ids is not None else shape_list(inputs_embeds)
+ batch_size, seq_len = input_shape[:2]
+ padding_len = (attention_window - seq_len % attention_window) % attention_window
+
+ paddings = tf.convert_to_tensor([[0, 0], [0, padding_len]])
+
+ if input_ids is not None:
+ input_ids = tf.pad(input_ids, paddings, constant_values=pad_token_id)
+
+ if position_ids is not None:
+ # pad with position_id = pad_token_id as in modeling_roberta.RobertaEmbeddings
+ position_ids = tf.pad(position_ids, paddings, constant_values=pad_token_id)
+
+ if inputs_embeds is not None:
+ if padding_len > 0:
+ input_ids_padding = tf.cast(tf.fill((batch_size, padding_len), self.pad_token_id), tf.int64)
+ inputs_embeds_padding = self.embeddings(input_ids_padding)
+ inputs_embeds = tf.concat([inputs_embeds, inputs_embeds_padding], axis=-2)
+
+ attention_mask = tf.pad(attention_mask, paddings, constant_values=False) # no attention on the padding tokens
+ token_type_ids = tf.pad(token_type_ids, paddings, constant_values=0) # pad with token_type_id = 0
+
+ return (
+ padding_len,
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ position_ids,
+ inputs_embeds,
+ )
+
+ @staticmethod
+ def _merge_to_attention_mask(attention_mask: tf.Tensor, global_attention_mask: tf.Tensor):
+ # longformer self attention expects attention mask to have 0 (no attn), 1 (local attn), 2 (global attn)
+ # (global_attention_mask + 1) => 1 for local attention, 2 for global attention
+ # => final attention_mask => 0 for no attention, 1 for local attention 2 for global attention
+ if attention_mask is not None:
+ attention_mask = attention_mask * (global_attention_mask + 1)
+ else:
+ # simply use `global_attention_mask` as `attention_mask`
+ # if no `attention_mask` is given
+ attention_mask = global_attention_mask + 1
+
+ return attention_mask
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embeddings", None) is not None:
+ with tf.name_scope(self.embeddings.name):
+ self.embeddings.build(None)
+ if getattr(self, "encoder", None) is not None:
+ with tf.name_scope(self.encoder.name):
+ self.encoder.build(None)
+ if getattr(self, "pooler", None) is not None:
+ with tf.name_scope(self.pooler.name):
+ self.pooler.build(None)
+
+
+class TFLongformerPreTrainedModel(TFPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = LongformerConfig
+ base_model_prefix = "longformer"
+
+ @property
+ def input_signature(self):
+ sig = super().input_signature
+ sig["global_attention_mask"] = tf.TensorSpec((None, None), tf.int32, name="global_attention_mask")
+ return sig
+
+
+LONGFORMER_START_DOCSTRING = r"""
+
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TensorFlow models and layers in `transformers` accept two formats as input:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional argument.
+
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
+ positional argument:
+
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
+
+ Note that when creating models and layers with
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
+ about any of this, as you can just pass inputs like you would to any other Python function!
+
+
+
+ Parameters:
+ config ([`LongformerConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+LONGFORMER_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
+ [`PreTrainedTokenizer.encode`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`np.ndarray` or `tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ global_attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
+ Mask to decide the attention given on each token, local attention or global attention. Tokens with global
+ attention attends to all other tokens, and all other tokens attend to them. This is important for
+ task-specific finetuning because it makes the model more flexible at representing the task. For example,
+ for classification, the token should be given global attention. For QA, all question tokens should also
+ have global attention. Please refer to the [Longformer paper](https://arxiv.org/abs/2004.05150) for more
+ details. Mask values selected in `[0, 1]`:
+
+ - 0 for local attention (a sliding window attention),
+ - 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).
+
+ token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
+ config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+"""
+
+
+@add_start_docstrings(
+ "The bare Longformer Model outputting raw hidden-states without any specific head on top.",
+ LONGFORMER_START_DOCSTRING,
+)
+class TFLongformerModel(TFLongformerPreTrainedModel):
+ """
+
+ This class copies code from [`TFRobertaModel`] and overwrites standard self-attention with longformer
+ self-attention to provide the ability to process long sequences following the self-attention approach described in
+ [Longformer: the Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, and
+ Arman Cohan. Longformer self-attention combines a local (sliding window) and global attention to extend to long
+ documents without the O(n^2) increase in memory and compute.
+
+ The self-attention module `TFLongformerSelfAttention` implemented here supports the combination of local and global
+ attention but it lacks support for autoregressive attention and dilated attention. Autoregressive and dilated
+ attention are more relevant for autoregressive language modeling than finetuning on downstream tasks. Future
+ release will add support for autoregressive attention, but the support for dilated attention requires a custom CUDA
+ kernel to be memory and compute efficient.
+
+ """
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.longformer = TFLongformerMainLayer(config, name="longformer")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ global_attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFLongformerBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
+ outputs = self.longformer(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ global_attention_mask=global_attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "longformer", None) is not None:
+ with tf.name_scope(self.longformer.name):
+ self.longformer.build(None)
+
+
+@add_start_docstrings(
+ """Longformer Model with a `language modeling` head on top.""",
+ LONGFORMER_START_DOCSTRING,
+)
+class TFLongformerForMaskedLM(TFLongformerPreTrainedModel, TFMaskedLanguageModelingLoss):
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.longformer = TFLongformerMainLayer(config, add_pooling_layer=False, name="longformer")
+ self.lm_head = TFLongformerLMHead(config, self.longformer.embeddings, name="lm_head")
+
+ def get_lm_head(self):
+ return self.lm_head
+
+ def get_prefix_bias_name(self):
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
+ return self.name + "/" + self.lm_head.name
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint="allenai/longformer-base-4096",
+ output_type=TFLongformerMaskedLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ mask="",
+ expected_output="' Paris'",
+ expected_loss=0.44,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ global_attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFLongformerMaskedLMOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ """
+
+ outputs = self.longformer(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ global_attention_mask=global_attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+ prediction_scores = self.lm_head(sequence_output, training=training)
+ loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores)
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[2:]
+
+ return ((loss,) + output) if loss is not None else output
+
+ return TFLongformerMaskedLMOutput(
+ loss=loss,
+ logits=prediction_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ global_attentions=outputs.global_attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "longformer", None) is not None:
+ with tf.name_scope(self.longformer.name):
+ self.longformer.build(None)
+ if getattr(self, "lm_head", None) is not None:
+ with tf.name_scope(self.lm_head.name):
+ self.lm_head.build(None)
+
+
+@add_start_docstrings(
+ """
+ Longformer Model with a span classification head on top for extractive question-answering tasks like SQuAD /
+ TriviaQA (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ LONGFORMER_START_DOCSTRING,
+)
+class TFLongformerForQuestionAnswering(TFLongformerPreTrainedModel, TFQuestionAnsweringLoss):
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.num_labels = config.num_labels
+ self.longformer = TFLongformerMainLayer(config, add_pooling_layer=False, name="longformer")
+ self.qa_outputs = keras.layers.Dense(
+ config.num_labels,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="qa_outputs",
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint="allenai/longformer-large-4096-finetuned-triviaqa",
+ output_type=TFLongformerQuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output="' puppet'",
+ expected_loss=0.96,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ global_attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ start_positions: np.ndarray | tf.Tensor | None = None,
+ end_positions: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFLongformerQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
+ r"""
+ start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+
+ if input_ids is not None and not isinstance(input_ids, tf.Tensor):
+ input_ids = tf.convert_to_tensor(input_ids, dtype=tf.int64)
+ elif input_ids is not None:
+ input_ids = tf.cast(input_ids, tf.int64)
+
+ if attention_mask is not None and not isinstance(attention_mask, tf.Tensor):
+ attention_mask = tf.convert_to_tensor(attention_mask, dtype=tf.int64)
+ elif attention_mask is not None:
+ attention_mask = tf.cast(attention_mask, tf.int64)
+
+ if global_attention_mask is not None and not isinstance(global_attention_mask, tf.Tensor):
+ global_attention_mask = tf.convert_to_tensor(global_attention_mask, dtype=tf.int64)
+ elif global_attention_mask is not None:
+ global_attention_mask = tf.cast(global_attention_mask, tf.int64)
+
+ # set global attention on question tokens
+ if global_attention_mask is None and input_ids is not None:
+ if shape_list(tf.where(input_ids == self.config.sep_token_id))[0] != 3 * shape_list(input_ids)[0]:
+ logger.warning(
+ f"There should be exactly three separator tokens: {self.config.sep_token_id} in every sample for"
+ " questions answering. You might also consider to set `global_attention_mask` manually in the"
+ " forward function to avoid this. This is most likely an error. The global attention is disabled"
+ " for this forward pass."
+ )
+ global_attention_mask = tf.cast(tf.fill(shape_list(input_ids), value=0), tf.int64)
+ else:
+ logger.warning_once("Initializing global attention on question tokens...")
+ # put global attention on all tokens until `config.sep_token_id` is reached
+ sep_token_indices = tf.where(input_ids == self.config.sep_token_id)
+ sep_token_indices = tf.cast(sep_token_indices, dtype=tf.int64)
+ global_attention_mask = _compute_global_attention_mask(shape_list(input_ids), sep_token_indices)
+
+ outputs = self.longformer(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ global_attention_mask=global_attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = tf.split(logits, 2, axis=-1)
+ start_logits = tf.squeeze(start_logits, axis=-1)
+ end_logits = tf.squeeze(end_logits, axis=-1)
+ loss = None
+
+ if start_positions is not None and end_positions is not None:
+ labels = {"start_position": start_positions}
+ labels["end_position"] = end_positions
+ loss = self.hf_compute_loss(labels, (start_logits, end_logits))
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[2:]
+
+ return ((loss,) + output) if loss is not None else output
+
+ return TFLongformerQuestionAnsweringModelOutput(
+ loss=loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ global_attentions=outputs.global_attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "longformer", None) is not None:
+ with tf.name_scope(self.longformer.name):
+ self.longformer.build(None)
+ if getattr(self, "qa_outputs", None) is not None:
+ with tf.name_scope(self.qa_outputs.name):
+ self.qa_outputs.build([None, None, self.config.hidden_size])
+
+
+class TFLongformerClassificationHead(keras.layers.Layer):
+ """Head for sentence-level classification tasks."""
+
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.dense = keras.layers.Dense(
+ config.hidden_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ activation="tanh",
+ name="dense",
+ )
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
+ self.out_proj = keras.layers.Dense(
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj"
+ )
+ self.config = config
+
+ def call(self, hidden_states, training=False):
+ hidden_states = hidden_states[:, 0, :] # take token (equiv. to [CLS])
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states, training=training)
+ output = self.out_proj(hidden_states)
+ return output
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+ if getattr(self, "out_proj", None) is not None:
+ with tf.name_scope(self.out_proj.name):
+ self.out_proj.build([None, None, self.config.hidden_size])
+
+
+@add_start_docstrings(
+ """
+ Longformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the
+ pooled output) e.g. for GLUE tasks.
+ """,
+ LONGFORMER_START_DOCSTRING,
+)
+class TFLongformerForSequenceClassification(TFLongformerPreTrainedModel, TFSequenceClassificationLoss):
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.num_labels = config.num_labels
+
+ self.longformer = TFLongformerMainLayer(config, add_pooling_layer=False, name="longformer")
+ self.classifier = TFLongformerClassificationHead(config, name="classifier")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFLongformerSequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ global_attention_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFLongformerSequenceClassifierOutput, Tuple[tf.Tensor]]:
+ if input_ids is not None and not isinstance(input_ids, tf.Tensor):
+ input_ids = tf.convert_to_tensor(input_ids, dtype=tf.int64)
+ elif input_ids is not None:
+ input_ids = tf.cast(input_ids, tf.int64)
+
+ if attention_mask is not None and not isinstance(attention_mask, tf.Tensor):
+ attention_mask = tf.convert_to_tensor(attention_mask, dtype=tf.int64)
+ elif attention_mask is not None:
+ attention_mask = tf.cast(attention_mask, tf.int64)
+
+ if global_attention_mask is not None and not isinstance(global_attention_mask, tf.Tensor):
+ global_attention_mask = tf.convert_to_tensor(global_attention_mask, dtype=tf.int64)
+ elif global_attention_mask is not None:
+ global_attention_mask = tf.cast(global_attention_mask, tf.int64)
+
+ if global_attention_mask is None and input_ids is not None:
+ logger.warning_once("Initializing global attention on CLS token...")
+ # global attention on cls token
+ global_attention_mask = tf.zeros_like(input_ids)
+ updates = tf.ones(shape_list(input_ids)[0], dtype=tf.int64)
+ indices = tf.pad(
+ tensor=tf.expand_dims(tf.range(shape_list(input_ids)[0], dtype=tf.int64), axis=1),
+ paddings=[[0, 0], [0, 1]],
+ constant_values=0,
+ )
+ global_attention_mask = tf.tensor_scatter_nd_update(
+ global_attention_mask,
+ indices,
+ updates,
+ )
+
+ outputs = self.longformer(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ global_attention_mask=global_attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+ logits = self.classifier(sequence_output)
+
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFLongformerSequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ global_attentions=outputs.global_attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "longformer", None) is not None:
+ with tf.name_scope(self.longformer.name):
+ self.longformer.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build(None)
+
+
+@add_start_docstrings(
+ """
+ Longformer Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
+ a softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ LONGFORMER_START_DOCSTRING,
+)
+class TFLongformerForMultipleChoice(TFLongformerPreTrainedModel, TFMultipleChoiceLoss):
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
+ _keys_to_ignore_on_load_missing = [r"dropout"]
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.longformer = TFLongformerMainLayer(config, name="longformer")
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
+ self.classifier = keras.layers.Dense(
+ 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
+ )
+ self.config = config
+
+ @property
+ def input_signature(self):
+ return {
+ "input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"),
+ "attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"),
+ "global_attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="global_attention_mask"),
+ }
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(
+ LONGFORMER_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
+ )
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFLongformerMultipleChoiceModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ global_attention_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFLongformerMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
+ """
+
+ if input_ids is not None:
+ num_choices = shape_list(input_ids)[1]
+ seq_length = shape_list(input_ids)[2]
+ else:
+ num_choices = shape_list(inputs_embeds)[1]
+ seq_length = shape_list(inputs_embeds)[2]
+
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
+ flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
+ flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
+ flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None
+ flat_global_attention_mask = (
+ tf.reshape(global_attention_mask, (-1, shape_list(global_attention_mask)[-1]))
+ if global_attention_mask is not None
+ else None
+ )
+ flat_inputs_embeds = (
+ tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3]))
+ if inputs_embeds is not None
+ else None
+ )
+
+ outputs = self.longformer(
+ flat_input_ids,
+ position_ids=flat_position_ids,
+ token_type_ids=flat_token_type_ids,
+ attention_mask=flat_attention_mask,
+ head_mask=head_mask,
+ global_attention_mask=flat_global_attention_mask,
+ inputs_embeds=flat_inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ pooled_output = outputs[1]
+
+ pooled_output = self.dropout(pooled_output)
+ logits = self.classifier(pooled_output)
+ reshaped_logits = tf.reshape(logits, (-1, num_choices))
+
+ loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits)
+
+ if not return_dict:
+ output = (reshaped_logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFLongformerMultipleChoiceModelOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ global_attentions=outputs.global_attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "longformer", None) is not None:
+ with tf.name_scope(self.longformer.name):
+ self.longformer.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.hidden_size])
+
+
+@add_start_docstrings(
+ """
+ Longformer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
+ for Named-Entity-Recognition (NER) tasks.
+ """,
+ LONGFORMER_START_DOCSTRING,
+)
+class TFLongformerForTokenClassification(TFLongformerPreTrainedModel, TFTokenClassificationLoss):
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
+ _keys_to_ignore_on_load_missing = [r"dropout"]
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.num_labels = config.num_labels
+ self.longformer = TFLongformerMainLayer(config=config, add_pooling_layer=False, name="longformer")
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
+ self.classifier = keras.layers.Dense(
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFLongformerTokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ global_attention_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: Optional[Union[np.array, tf.Tensor]] = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFLongformerTokenClassifierOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+
+ outputs = self.longformer(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ global_attention_mask=global_attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+ sequence_output = self.dropout(sequence_output)
+ logits = self.classifier(sequence_output)
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFLongformerTokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ global_attentions=outputs.global_attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "longformer", None) is not None:
+ with tf.name_scope(self.longformer.name):
+ self.longformer.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.hidden_size])
diff --git a/venv/lib/python3.10/site-packages/transformers/models/longformer/tokenization_longformer.py b/venv/lib/python3.10/site-packages/transformers/models/longformer/tokenization_longformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..51728d778081580a89ab067577439dfa3e46a6df
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/longformer/tokenization_longformer.py
@@ -0,0 +1,399 @@
+# coding=utf-8
+# Copyright 2020 The Allen Institute for AI team and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import os
+from functools import lru_cache
+from typing import List, Optional, Tuple
+
+import regex as re
+
+from ...tokenization_utils import AddedToken, PreTrainedTokenizer
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
+
+
+@lru_cache()
+# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
+def bytes_to_unicode():
+ """
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
+ characters the bpe code barfs on.
+
+ The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
+ if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
+ decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
+ tables between utf-8 bytes and unicode strings.
+ """
+ bs = (
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
+ )
+ cs = bs[:]
+ n = 0
+ for b in range(2**8):
+ if b not in bs:
+ bs.append(b)
+ cs.append(2**8 + n)
+ n += 1
+ cs = [chr(n) for n in cs]
+ return dict(zip(bs, cs))
+
+
+# Copied from transformers.models.roberta.tokenization_roberta.get_pairs
+def get_pairs(word):
+ """
+ Return set of symbol pairs in a word.
+
+ Word is represented as tuple of symbols (symbols being variable-length strings).
+ """
+ pairs = set()
+ prev_char = word[0]
+ for char in word[1:]:
+ pairs.add((prev_char, char))
+ prev_char = char
+ return pairs
+
+
+# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer with FacebookAI/roberta-base->allenai/longformer-base-4096, RoBERTa->Longformer all-casing, RobertaTokenizer->LongformerTokenizer
+class LongformerTokenizer(PreTrainedTokenizer):
+ """
+ Constructs a Longformer tokenizer, derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding.
+
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
+
+ ```python
+ >>> from transformers import LongformerTokenizer
+
+ >>> tokenizer = LongformerTokenizer.from_pretrained("allenai/longformer-base-4096")
+ >>> tokenizer("Hello world")["input_ids"]
+ [0, 31414, 232, 2]
+
+ >>> tokenizer(" Hello world")["input_ids"]
+ [0, 20920, 232, 2]
+ ```
+
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
+ call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
+
+
+
+ When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
+
+
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ merges_file (`str`):
+ Path to the merges file.
+ errors (`str`, *optional*, defaults to `"replace"`):
+ Paradigm to follow when decoding bytes to UTF-8. See
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
+ The token used is the `sep_token`.
+
+
+
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ cls_token (`str`, *optional*, defaults to `""`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ mask_token (`str`, *optional*, defaults to `""`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
+ other word. (Longformer tokenizer detect beginning of words by the preceding space).
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file,
+ merges_file,
+ errors="replace",
+ bos_token="",
+ eos_token="",
+ sep_token="",
+ cls_token="",
+ unk_token="",
+ pad_token="",
+ mask_token="",
+ add_prefix_space=False,
+ **kwargs,
+ ):
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
+ sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
+ cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
+
+ # Mask token behave like a normal word, i.e. include the space before it
+ mask_token = (
+ AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False)
+ if isinstance(mask_token, str)
+ else mask_token
+ )
+
+ # these special tokens are not part of the vocab.json, let's add them in the correct order
+
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
+ self.encoder = json.load(vocab_handle)
+ self.decoder = {v: k for k, v in self.encoder.items()}
+ self.errors = errors # how to handle errors in decoding
+ self.byte_encoder = bytes_to_unicode()
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
+ with open(merges_file, encoding="utf-8") as merges_handle:
+ bpe_merges = merges_handle.read().split("\n")[1:-1]
+ bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
+ self.cache = {}
+ self.add_prefix_space = add_prefix_space
+
+ # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
+ self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
+
+ super().__init__(
+ errors=errors,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ cls_token=cls_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ add_prefix_space=add_prefix_space,
+ **kwargs,
+ )
+
+ @property
+ def vocab_size(self):
+ return len(self.encoder)
+
+ def get_vocab(self):
+ vocab = dict(self.encoder).copy()
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ def bpe(self, token):
+ if token in self.cache:
+ return self.cache[token]
+ word = tuple(token)
+ pairs = get_pairs(word)
+
+ if not pairs:
+ return token
+
+ while True:
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
+ if bigram not in self.bpe_ranks:
+ break
+ first, second = bigram
+ new_word = []
+ i = 0
+ while i < len(word):
+ try:
+ j = word.index(first, i)
+ except ValueError:
+ new_word.extend(word[i:])
+ break
+ else:
+ new_word.extend(word[i:j])
+ i = j
+
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
+ new_word.append(first + second)
+ i += 2
+ else:
+ new_word.append(word[i])
+ i += 1
+ new_word = tuple(new_word)
+ word = new_word
+ if len(word) == 1:
+ break
+ else:
+ pairs = get_pairs(word)
+ word = " ".join(word)
+ self.cache[token] = word
+ return word
+
+ def _tokenize(self, text):
+ """Tokenize a string."""
+ bpe_tokens = []
+ for token in re.findall(self.pat, text):
+ token = "".join(
+ self.byte_encoder[b] for b in token.encode("utf-8")
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
+ bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
+ return bpe_tokens
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.decoder.get(index)
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ text = "".join(tokens)
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
+ return text
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+ merge_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
+ )
+
+ with open(vocab_file, "w", encoding="utf-8") as f:
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
+
+ index = 0
+ with open(merge_file, "w", encoding="utf-8") as writer:
+ writer.write("#version: 0.2\n")
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
+ if index != token_index:
+ logger.warning(
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
+ " Please check that the tokenizer is not corrupted!"
+ )
+ index = token_index
+ writer.write(" ".join(bpe_tokens) + "\n")
+ index += 1
+
+ return vocab_file, merge_file
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A Longformer sequence has the following format:
+
+ - single sequence: ` X `
+ - pair of sequences: ` A B `
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is None:
+ return [1] + ([0] * len(token_ids_0)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. Longformer does not
+ make use of token type ids, therefore a list of zeros is returned.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of zeros.
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
+
+ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
+ add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
+ if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()):
+ text = " " + text
+ return (text, kwargs)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/longformer/tokenization_longformer_fast.py b/venv/lib/python3.10/site-packages/transformers/models/longformer/tokenization_longformer_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..02b74818a23ef813c5a03e1e2eba7585cb3b46b7
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/longformer/tokenization_longformer_fast.py
@@ -0,0 +1,269 @@
+# coding=utf-8
+# Copyright 2020 The Allen Institute for AI team and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Fast Tokenization classes for Longformer."""
+import json
+from typing import List, Optional, Tuple
+
+from tokenizers import pre_tokenizers, processors
+
+from ...tokenization_utils_base import AddedToken, BatchEncoding
+from ...tokenization_utils_fast import PreTrainedTokenizerFast
+from ...utils import logging
+from .tokenization_longformer import LongformerTokenizer
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
+
+
+# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast with FacebookAI/roberta-base->allenai/longformer-base-4096, RoBERTa->Longformer all-casing, Roberta->Longformer
+class LongformerTokenizerFast(PreTrainedTokenizerFast):
+ """
+ Construct a "fast" Longformer tokenizer (backed by HuggingFace's *tokenizers* library), derived from the GPT-2
+ tokenizer, using byte-level Byte-Pair-Encoding.
+
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
+
+ ```python
+ >>> from transformers import LongformerTokenizerFast
+
+ >>> tokenizer = LongformerTokenizerFast.from_pretrained("allenai/longformer-base-4096")
+ >>> tokenizer("Hello world")["input_ids"]
+ [0, 31414, 232, 2]
+
+ >>> tokenizer(" Hello world")["input_ids"]
+ [0, 20920, 232, 2]
+ ```
+
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
+ call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
+
+
+
+ When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
+
+
+
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
+ refer to this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ merges_file (`str`):
+ Path to the merges file.
+ errors (`str`, *optional*, defaults to `"replace"`):
+ Paradigm to follow when decoding bytes to UTF-8. See
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
+ The token used is the `sep_token`.
+
+
+
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ cls_token (`str`, *optional*, defaults to `""`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ mask_token (`str`, *optional*, defaults to `""`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
+ other word. (Longformer tokenizer detect beginning of words by the preceding space).
+ trim_offsets (`bool`, *optional*, defaults to `True`):
+ Whether the post processing step should trim offsets to avoid including whitespaces.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "attention_mask"]
+ slow_tokenizer_class = LongformerTokenizer
+
+ def __init__(
+ self,
+ vocab_file=None,
+ merges_file=None,
+ tokenizer_file=None,
+ errors="replace",
+ bos_token="",
+ eos_token="",
+ sep_token="",
+ cls_token="",
+ unk_token="",
+ pad_token="",
+ mask_token="",
+ add_prefix_space=False,
+ trim_offsets=True,
+ **kwargs,
+ ):
+ mask_token = (
+ AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False)
+ if isinstance(mask_token, str)
+ else mask_token
+ )
+ super().__init__(
+ vocab_file,
+ merges_file,
+ tokenizer_file=tokenizer_file,
+ errors=errors,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ sep_token=sep_token,
+ cls_token=cls_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ add_prefix_space=add_prefix_space,
+ trim_offsets=trim_offsets,
+ **kwargs,
+ )
+
+ pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
+ if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
+ pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type"))
+ pre_tok_state["add_prefix_space"] = add_prefix_space
+ self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
+
+ self.add_prefix_space = add_prefix_space
+
+ tokenizer_component = "post_processor"
+ tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None)
+ if tokenizer_component_instance:
+ state = json.loads(tokenizer_component_instance.__getstate__())
+
+ # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
+ if "sep" in state:
+ state["sep"] = tuple(state["sep"])
+ if "cls" in state:
+ state["cls"] = tuple(state["cls"])
+
+ changes_to_apply = False
+
+ if state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
+ state["add_prefix_space"] = add_prefix_space
+ changes_to_apply = True
+
+ if state.get("trim_offsets", trim_offsets) != trim_offsets:
+ state["trim_offsets"] = trim_offsets
+ changes_to_apply = True
+
+ if changes_to_apply:
+ component_class = getattr(processors, state.pop("type"))
+ new_value = component_class(**state)
+ setattr(self.backend_tokenizer, tokenizer_component, new_value)
+
+ @property
+ def mask_token(self) -> str:
+ """
+ `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not
+ having been set.
+
+ Longformer tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will greedily
+ comprise the space before the **.
+ """
+ if self._mask_token is None:
+ if self.verbose:
+ logger.error("Using mask_token, but it is not set yet.")
+ return None
+ return str(self._mask_token)
+
+ @mask_token.setter
+ def mask_token(self, value):
+ """
+ Overriding the default behavior of the mask token to have it eat the space before it.
+
+ This is needed to preserve backward compatibility with all the previously used models based on Longformer.
+ """
+ # Mask token behave like a normal word, i.e. include the space before it
+ # So we set lstrip to True
+ value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value
+ self._mask_token = value
+
+ def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
+ is_split_into_words = kwargs.get("is_split_into_words", False)
+ assert self.add_prefix_space or not is_split_into_words, (
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
+ "to use it with pretokenized inputs."
+ )
+
+ return super()._batch_encode_plus(*args, **kwargs)
+
+ def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
+ is_split_into_words = kwargs.get("is_split_into_words", False)
+
+ assert self.add_prefix_space or not is_split_into_words, (
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
+ "to use it with pretokenized inputs."
+ )
+
+ return super()._encode_plus(*args, **kwargs)
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
+ return tuple(files)
+
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
+ output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id]
+ if token_ids_1 is None:
+ return output
+
+ return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id]
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. Longformer does not
+ make use of token type ids, therefore a list of zeros is returned.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of zeros.
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
diff --git a/venv/lib/python3.10/site-packages/transformers/models/markuplm/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/markuplm/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f8df88ce16f683bce947839ab1dbf5b4b1325ee1
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/markuplm/__init__.py
@@ -0,0 +1,83 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
+
+
+_import_structure = {
+ "configuration_markuplm": ["MARKUPLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "MarkupLMConfig"],
+ "feature_extraction_markuplm": ["MarkupLMFeatureExtractor"],
+ "processing_markuplm": ["MarkupLMProcessor"],
+ "tokenization_markuplm": ["MarkupLMTokenizer"],
+}
+
+try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_markuplm_fast"] = ["MarkupLMTokenizerFast"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_markuplm"] = [
+ "MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "MarkupLMForQuestionAnswering",
+ "MarkupLMForSequenceClassification",
+ "MarkupLMForTokenClassification",
+ "MarkupLMModel",
+ "MarkupLMPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_markuplm import MARKUPLM_PRETRAINED_CONFIG_ARCHIVE_MAP, MarkupLMConfig
+ from .feature_extraction_markuplm import MarkupLMFeatureExtractor
+ from .processing_markuplm import MarkupLMProcessor
+ from .tokenization_markuplm import MarkupLMTokenizer
+
+ try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_markuplm_fast import MarkupLMTokenizerFast
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_markuplm import (
+ MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST,
+ MarkupLMForQuestionAnswering,
+ MarkupLMForSequenceClassification,
+ MarkupLMForTokenClassification,
+ MarkupLMModel,
+ MarkupLMPreTrainedModel,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/markuplm/__pycache__/configuration_markuplm.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/markuplm/__pycache__/configuration_markuplm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6dfb8114e8831ce7cd165609bd32b8e712579408
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/markuplm/__pycache__/configuration_markuplm.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/markuplm/configuration_markuplm.py b/venv/lib/python3.10/site-packages/transformers/models/markuplm/configuration_markuplm.py
new file mode 100644
index 0000000000000000000000000000000000000000..aeb80ae51f96baecf7e84276af9839559e49d596
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/markuplm/configuration_markuplm.py
@@ -0,0 +1,156 @@
+# coding=utf-8
+# Copyright 2021, The Microsoft Research Asia MarkupLM Team authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" MarkupLM model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import MARKUPLM_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class MarkupLMConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`MarkupLMModel`]. It is used to instantiate a
+ MarkupLM model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the MarkupLM
+ [microsoft/markuplm-base](https://huggingface.co/microsoft/markuplm-base) architecture.
+
+ Configuration objects inherit from [`BertConfig`] and can be used to control the model outputs. Read the
+ documentation from [`BertConfig`] for more information.
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 30522):
+ Vocabulary size of the MarkupLM model. Defines the different tokens that can be represented by the
+ *inputs_ids* passed to the forward method of [`MarkupLMModel`].
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ type_vocab_size (`int`, *optional*, defaults to 2):
+ The vocabulary size of the `token_type_ids` passed into [`MarkupLMModel`].
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ max_tree_id_unit_embeddings (`int`, *optional*, defaults to 1024):
+ The maximum value that the tree id unit embedding might ever use. Typically set this to something large
+ just in case (e.g., 1024).
+ max_xpath_tag_unit_embeddings (`int`, *optional*, defaults to 256):
+ The maximum value that the xpath tag unit embedding might ever use. Typically set this to something large
+ just in case (e.g., 256).
+ max_xpath_subs_unit_embeddings (`int`, *optional*, defaults to 1024):
+ The maximum value that the xpath subscript unit embedding might ever use. Typically set this to something
+ large just in case (e.g., 1024).
+ tag_pad_id (`int`, *optional*, defaults to 216):
+ The id of the padding token in the xpath tags.
+ subs_pad_id (`int`, *optional*, defaults to 1001):
+ The id of the padding token in the xpath subscripts.
+ xpath_tag_unit_hidden_size (`int`, *optional*, defaults to 32):
+ The hidden size of each tree id unit. One complete tree index will have
+ (50*xpath_tag_unit_hidden_size)-dim.
+ max_depth (`int`, *optional*, defaults to 50):
+ The maximum depth in xpath.
+
+ Examples:
+
+ ```python
+ >>> from transformers import MarkupLMModel, MarkupLMConfig
+
+ >>> # Initializing a MarkupLM microsoft/markuplm-base style configuration
+ >>> configuration = MarkupLMConfig()
+
+ >>> # Initializing a model from the microsoft/markuplm-base style configuration
+ >>> model = MarkupLMModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "markuplm"
+
+ def __init__(
+ self,
+ vocab_size=30522,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.1,
+ attention_probs_dropout_prob=0.1,
+ max_position_embeddings=512,
+ type_vocab_size=2,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ pad_token_id=0,
+ bos_token_id=0,
+ eos_token_id=2,
+ max_xpath_tag_unit_embeddings=256,
+ max_xpath_subs_unit_embeddings=1024,
+ tag_pad_id=216,
+ subs_pad_id=1001,
+ xpath_unit_hidden_size=32,
+ max_depth=50,
+ position_embedding_type="absolute",
+ use_cache=True,
+ classifier_dropout=None,
+ **kwargs,
+ ):
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ **kwargs,
+ )
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.hidden_act = hidden_act
+ self.intermediate_size = intermediate_size
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.max_position_embeddings = max_position_embeddings
+ self.type_vocab_size = type_vocab_size
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.position_embedding_type = position_embedding_type
+ self.use_cache = use_cache
+ self.classifier_dropout = classifier_dropout
+ # additional properties
+ self.max_depth = max_depth
+ self.max_xpath_tag_unit_embeddings = max_xpath_tag_unit_embeddings
+ self.max_xpath_subs_unit_embeddings = max_xpath_subs_unit_embeddings
+ self.tag_pad_id = tag_pad_id
+ self.subs_pad_id = subs_pad_id
+ self.xpath_unit_hidden_size = xpath_unit_hidden_size
diff --git a/venv/lib/python3.10/site-packages/transformers/models/markuplm/feature_extraction_markuplm.py b/venv/lib/python3.10/site-packages/transformers/models/markuplm/feature_extraction_markuplm.py
new file mode 100644
index 0000000000000000000000000000000000000000..73c16bad302b54d6456e3be7e16c825c4d03b6ad
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/markuplm/feature_extraction_markuplm.py
@@ -0,0 +1,183 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Feature extractor class for MarkupLM.
+"""
+
+import html
+
+from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
+from ...utils import is_bs4_available, logging, requires_backends
+
+
+if is_bs4_available():
+ import bs4
+ from bs4 import BeautifulSoup
+
+
+logger = logging.get_logger(__name__)
+
+
+class MarkupLMFeatureExtractor(FeatureExtractionMixin):
+ r"""
+ Constructs a MarkupLM feature extractor. This can be used to get a list of nodes and corresponding xpaths from HTML
+ strings.
+
+ This feature extractor inherits from [`~feature_extraction_utils.PreTrainedFeatureExtractor`] which contains most
+ of the main methods. Users should refer to this superclass for more information regarding those methods.
+
+ """
+
+ def __init__(self, **kwargs):
+ requires_backends(self, ["bs4"])
+ super().__init__(**kwargs)
+
+ def xpath_soup(self, element):
+ xpath_tags = []
+ xpath_subscripts = []
+ child = element if element.name else element.parent
+ for parent in child.parents: # type: bs4.element.Tag
+ siblings = parent.find_all(child.name, recursive=False)
+ xpath_tags.append(child.name)
+ xpath_subscripts.append(
+ 0 if 1 == len(siblings) else next(i for i, s in enumerate(siblings, 1) if s is child)
+ )
+ child = parent
+ xpath_tags.reverse()
+ xpath_subscripts.reverse()
+ return xpath_tags, xpath_subscripts
+
+ def get_three_from_single(self, html_string):
+ html_code = BeautifulSoup(html_string, "html.parser")
+
+ all_doc_strings = []
+ string2xtag_seq = []
+ string2xsubs_seq = []
+
+ for element in html_code.descendants:
+ if isinstance(element, bs4.element.NavigableString):
+ if type(element.parent) != bs4.element.Tag:
+ continue
+
+ text_in_this_tag = html.unescape(element).strip()
+ if not text_in_this_tag:
+ continue
+
+ all_doc_strings.append(text_in_this_tag)
+
+ xpath_tags, xpath_subscripts = self.xpath_soup(element)
+ string2xtag_seq.append(xpath_tags)
+ string2xsubs_seq.append(xpath_subscripts)
+
+ if len(all_doc_strings) != len(string2xtag_seq):
+ raise ValueError("Number of doc strings and xtags does not correspond")
+ if len(all_doc_strings) != len(string2xsubs_seq):
+ raise ValueError("Number of doc strings and xsubs does not correspond")
+
+ return all_doc_strings, string2xtag_seq, string2xsubs_seq
+
+ def construct_xpath(self, xpath_tags, xpath_subscripts):
+ xpath = ""
+ for tagname, subs in zip(xpath_tags, xpath_subscripts):
+ xpath += f"/{tagname}"
+ if subs != 0:
+ xpath += f"[{subs}]"
+ return xpath
+
+ def __call__(self, html_strings) -> BatchFeature:
+ """
+ Main method to prepare for the model one or several HTML strings.
+
+ Args:
+ html_strings (`str`, `List[str]`):
+ The HTML string or batch of HTML strings from which to extract nodes and corresponding xpaths.
+
+ Returns:
+ [`BatchFeature`]: A [`BatchFeature`] with the following fields:
+
+ - **nodes** -- Nodes.
+ - **xpaths** -- Corresponding xpaths.
+
+ Examples:
+
+ ```python
+ >>> from transformers import MarkupLMFeatureExtractor
+
+ >>> page_name_1 = "page1.html"
+ >>> page_name_2 = "page2.html"
+ >>> page_name_3 = "page3.html"
+
+ >>> with open(page_name_1) as f:
+ ... single_html_string = f.read()
+
+ >>> feature_extractor = MarkupLMFeatureExtractor()
+
+ >>> # single example
+ >>> encoding = feature_extractor(single_html_string)
+ >>> print(encoding.keys())
+ >>> # dict_keys(['nodes', 'xpaths'])
+
+ >>> # batched example
+
+ >>> multi_html_strings = []
+
+ >>> with open(page_name_2) as f:
+ ... multi_html_strings.append(f.read())
+ >>> with open(page_name_3) as f:
+ ... multi_html_strings.append(f.read())
+
+ >>> encoding = feature_extractor(multi_html_strings)
+ >>> print(encoding.keys())
+ >>> # dict_keys(['nodes', 'xpaths'])
+ ```"""
+
+ # Input type checking for clearer error
+ valid_strings = False
+
+ # Check that strings has a valid type
+ if isinstance(html_strings, str):
+ valid_strings = True
+ elif isinstance(html_strings, (list, tuple)):
+ if len(html_strings) == 0 or isinstance(html_strings[0], str):
+ valid_strings = True
+
+ if not valid_strings:
+ raise ValueError(
+ "HTML strings must of type `str`, `List[str]` (batch of examples), "
+ f"but is of type {type(html_strings)}."
+ )
+
+ is_batched = bool(isinstance(html_strings, (list, tuple)) and (isinstance(html_strings[0], str)))
+
+ if not is_batched:
+ html_strings = [html_strings]
+
+ # Get nodes + xpaths
+ nodes = []
+ xpaths = []
+ for html_string in html_strings:
+ all_doc_strings, string2xtag_seq, string2xsubs_seq = self.get_three_from_single(html_string)
+ nodes.append(all_doc_strings)
+ xpath_strings = []
+ for node, tag_list, sub_list in zip(all_doc_strings, string2xtag_seq, string2xsubs_seq):
+ xpath_string = self.construct_xpath(tag_list, sub_list)
+ xpath_strings.append(xpath_string)
+ xpaths.append(xpath_strings)
+
+ # return as Dict
+ data = {"nodes": nodes, "xpaths": xpaths}
+ encoded_inputs = BatchFeature(data=data, tensor_type=None)
+
+ return encoded_inputs
diff --git a/venv/lib/python3.10/site-packages/transformers/models/markuplm/modeling_markuplm.py b/venv/lib/python3.10/site-packages/transformers/models/markuplm/modeling_markuplm.py
new file mode 100644
index 0000000000000000000000000000000000000000..2058ce2795167689468496e43394ac26ee2bdeab
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/markuplm/modeling_markuplm.py
@@ -0,0 +1,1316 @@
+# coding=utf-8
+# Copyright 2022 Microsoft Research Asia and the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch MarkupLM model."""
+
+import math
+import os
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...file_utils import (
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ replace_return_docstrings,
+)
+from ...modeling_outputs import (
+ BaseModelOutputWithPastAndCrossAttentions,
+ BaseModelOutputWithPoolingAndCrossAttentions,
+ MaskedLMOutput,
+ QuestionAnsweringModelOutput,
+ SequenceClassifierOutput,
+ TokenClassifierOutput,
+)
+from ...modeling_utils import (
+ PreTrainedModel,
+ apply_chunking_to_forward,
+ find_pruneable_heads_and_indices,
+ prune_linear_layer,
+)
+from ...utils import logging
+from .configuration_markuplm import MarkupLMConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "microsoft/markuplm-base"
+_CONFIG_FOR_DOC = "MarkupLMConfig"
+
+
+from ..deprecated._archive_maps import MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+class XPathEmbeddings(nn.Module):
+ """Construct the embeddings from xpath tags and subscripts.
+
+ We drop tree-id in this version, as its info can be covered by xpath.
+ """
+
+ def __init__(self, config):
+ super(XPathEmbeddings, self).__init__()
+ self.max_depth = config.max_depth
+
+ self.xpath_unitseq2_embeddings = nn.Linear(config.xpath_unit_hidden_size * self.max_depth, config.hidden_size)
+
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ self.activation = nn.ReLU()
+ self.xpath_unitseq2_inner = nn.Linear(config.xpath_unit_hidden_size * self.max_depth, 4 * config.hidden_size)
+ self.inner2emb = nn.Linear(4 * config.hidden_size, config.hidden_size)
+
+ self.xpath_tag_sub_embeddings = nn.ModuleList(
+ [
+ nn.Embedding(config.max_xpath_tag_unit_embeddings, config.xpath_unit_hidden_size)
+ for _ in range(self.max_depth)
+ ]
+ )
+
+ self.xpath_subs_sub_embeddings = nn.ModuleList(
+ [
+ nn.Embedding(config.max_xpath_subs_unit_embeddings, config.xpath_unit_hidden_size)
+ for _ in range(self.max_depth)
+ ]
+ )
+
+ def forward(self, xpath_tags_seq=None, xpath_subs_seq=None):
+ xpath_tags_embeddings = []
+ xpath_subs_embeddings = []
+
+ for i in range(self.max_depth):
+ xpath_tags_embeddings.append(self.xpath_tag_sub_embeddings[i](xpath_tags_seq[:, :, i]))
+ xpath_subs_embeddings.append(self.xpath_subs_sub_embeddings[i](xpath_subs_seq[:, :, i]))
+
+ xpath_tags_embeddings = torch.cat(xpath_tags_embeddings, dim=-1)
+ xpath_subs_embeddings = torch.cat(xpath_subs_embeddings, dim=-1)
+
+ xpath_embeddings = xpath_tags_embeddings + xpath_subs_embeddings
+
+ xpath_embeddings = self.inner2emb(self.dropout(self.activation(self.xpath_unitseq2_inner(xpath_embeddings))))
+
+ return xpath_embeddings
+
+
+# Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids
+def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
+ """
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
+ are ignored. This is modified from fairseq's `utils.make_positions`.
+
+ Args:
+ x: torch.Tensor x:
+
+ Returns: torch.Tensor
+ """
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
+ mask = input_ids.ne(padding_idx).int()
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
+ return incremental_indices.long() + padding_idx
+
+
+class MarkupLMEmbeddings(nn.Module):
+ """Construct the embeddings from word, position and token_type embeddings."""
+
+ def __init__(self, config):
+ super(MarkupLMEmbeddings, self).__init__()
+ self.config = config
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
+
+ self.max_depth = config.max_depth
+
+ self.xpath_embeddings = XPathEmbeddings(config)
+
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
+
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
+ )
+
+ self.padding_idx = config.pad_token_id
+ self.position_embeddings = nn.Embedding(
+ config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
+ )
+
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaEmbeddings.create_position_ids_from_inputs_embeds
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds):
+ """
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
+
+ Args:
+ inputs_embeds: torch.Tensor
+
+ Returns: torch.Tensor
+ """
+ input_shape = inputs_embeds.size()[:-1]
+ sequence_length = input_shape[1]
+
+ position_ids = torch.arange(
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
+ )
+ return position_ids.unsqueeze(0).expand(input_shape)
+
+ def forward(
+ self,
+ input_ids=None,
+ xpath_tags_seq=None,
+ xpath_subs_seq=None,
+ token_type_ids=None,
+ position_ids=None,
+ inputs_embeds=None,
+ past_key_values_length=0,
+ ):
+ if input_ids is not None:
+ input_shape = input_ids.size()
+ else:
+ input_shape = inputs_embeds.size()[:-1]
+
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ if position_ids is None:
+ if input_ids is not None:
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
+ position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
+ else:
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
+
+ if token_type_ids is None:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+
+ # prepare xpath seq
+ if xpath_tags_seq is None:
+ xpath_tags_seq = self.config.tag_pad_id * torch.ones(
+ tuple(list(input_shape) + [self.max_depth]), dtype=torch.long, device=device
+ )
+ if xpath_subs_seq is None:
+ xpath_subs_seq = self.config.subs_pad_id * torch.ones(
+ tuple(list(input_shape) + [self.max_depth]), dtype=torch.long, device=device
+ )
+
+ words_embeddings = inputs_embeds
+ position_embeddings = self.position_embeddings(position_ids)
+
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
+
+ xpath_embeddings = self.xpath_embeddings(xpath_tags_seq, xpath_subs_seq)
+ embeddings = words_embeddings + position_embeddings + token_type_embeddings + xpath_embeddings
+
+ embeddings = self.LayerNorm(embeddings)
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+
+# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->MarkupLM
+class MarkupLMSelfOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertIntermediate
+class MarkupLMIntermediate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->MarkupLM
+class MarkupLMOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertPooler
+class MarkupLMPooler(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.activation = nn.Tanh()
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(first_token_tensor)
+ pooled_output = self.activation(pooled_output)
+ return pooled_output
+
+
+# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->MarkupLM
+class MarkupLMPredictionHeadTransform(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ if isinstance(config.hidden_act, str):
+ self.transform_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.transform_act_fn = config.hidden_act
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.transform_act_fn(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->MarkupLM
+class MarkupLMLMPredictionHead(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.transform = MarkupLMPredictionHeadTransform(config)
+
+ # The output weights are the same as the input embeddings, but there is
+ # an output-only bias for each token.
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
+
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
+ self.decoder.bias = self.bias
+
+ def forward(self, hidden_states):
+ hidden_states = self.transform(hidden_states)
+ hidden_states = self.decoder(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->MarkupLM
+class MarkupLMOnlyMLMHead(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.predictions = MarkupLMLMPredictionHead(config)
+
+ def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
+ prediction_scores = self.predictions(sequence_output)
+ return prediction_scores
+
+
+# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->MarkupLM
+class MarkupLMSelfAttention(nn.Module):
+ def __init__(self, config, position_embedding_type=None):
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads})"
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+ self.position_embedding_type = position_embedding_type or getattr(
+ config, "position_embedding_type", "absolute"
+ )
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ self.max_position_embeddings = config.max_position_embeddings
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
+
+ self.is_decoder = config.is_decoder
+
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ mixed_query_layer = self.query(hidden_states)
+
+ # If this is instantiated as a cross-attention module, the keys
+ # and values come from an encoder; the attention mask needs to be
+ # such that the encoder's padding tokens are not attended to.
+ is_cross_attention = encoder_hidden_states is not None
+
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_layer = past_key_value[0]
+ value_layer = past_key_value[1]
+ attention_mask = encoder_attention_mask
+ elif is_cross_attention:
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
+ attention_mask = encoder_attention_mask
+ elif past_key_value is not None:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
+ else:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ use_cache = past_key_value is not None
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_layer, value_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
+ if use_cache:
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
+ -1, 1
+ )
+ else:
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
+ distance = position_ids_l - position_ids_r
+
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
+
+ if self.position_embedding_type == "relative_key":
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores
+ elif self.position_embedding_type == "relative_key_query":
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in MarkupLMModel forward() function)
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ if self.is_decoder:
+ outputs = outputs + (past_key_value,)
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->MarkupLM
+class MarkupLMAttention(nn.Module):
+ def __init__(self, config, position_embedding_type=None):
+ super().__init__()
+ self.self = MarkupLMSelfAttention(config, position_embedding_type=position_embedding_type)
+ self.output = MarkupLMSelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.self.query = prune_linear_layer(self.self.query, index)
+ self.self.key = prune_linear_layer(self.self.key, index)
+ self.self.value = prune_linear_layer(self.self.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ self_outputs = self.self(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+ attention_output = self.output(self_outputs[0], hidden_states)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->MarkupLM
+class MarkupLMLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.attention = MarkupLMAttention(config)
+ self.is_decoder = config.is_decoder
+ self.add_cross_attention = config.add_cross_attention
+ if self.add_cross_attention:
+ if not self.is_decoder:
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
+ self.crossattention = MarkupLMAttention(config, position_embedding_type="absolute")
+ self.intermediate = MarkupLMIntermediate(config)
+ self.output = MarkupLMOutput(config)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ self_attention_outputs = self.attention(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ output_attentions=output_attentions,
+ past_key_value=self_attn_past_key_value,
+ )
+ attention_output = self_attention_outputs[0]
+
+ # if decoder, the last output is tuple of self-attn cache
+ if self.is_decoder:
+ outputs = self_attention_outputs[1:-1]
+ present_key_value = self_attention_outputs[-1]
+ else:
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ cross_attn_present_key_value = None
+ if self.is_decoder and encoder_hidden_states is not None:
+ if not hasattr(self, "crossattention"):
+ raise ValueError(
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
+ " by setting `config.add_cross_attention=True`"
+ )
+
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ cross_attention_outputs = self.crossattention(
+ attention_output,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ cross_attn_past_key_value,
+ output_attentions,
+ )
+ attention_output = cross_attention_outputs[0]
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
+
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
+ cross_attn_present_key_value = cross_attention_outputs[-1]
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ layer_output = apply_chunking_to_forward(
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
+ )
+ outputs = (layer_output,) + outputs
+
+ # if decoder, return the attn key/values as the last output
+ if self.is_decoder:
+ outputs = outputs + (present_key_value,)
+
+ return outputs
+
+ def feed_forward_chunk(self, attention_output):
+ intermediate_output = self.intermediate(attention_output)
+ layer_output = self.output(intermediate_output, attention_output)
+ return layer_output
+
+
+# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->MarkupLM
+class MarkupLMEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList([MarkupLMLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = False,
+ output_hidden_states: Optional[bool] = False,
+ return_dict: Optional[bool] = True,
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ next_decoder_cache = () if use_cache else None
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+ past_key_value = past_key_values[i] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+ if use_cache:
+ next_decoder_cache += (layer_outputs[-1],)
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+ if self.config.add_cross_attention:
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ hidden_states,
+ next_decoder_cache,
+ all_hidden_states,
+ all_self_attentions,
+ all_cross_attentions,
+ ]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_decoder_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+class MarkupLMPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = MarkupLMConfig
+ base_model_prefix = "markuplm"
+
+ # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights with Bert->MarkupLM
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs):
+ return super(MarkupLMPreTrainedModel, cls).from_pretrained(
+ pretrained_model_name_or_path, *model_args, **kwargs
+ )
+
+
+MARKUPLM_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`MarkupLMConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+MARKUPLM_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+
+ xpath_tags_seq (`torch.LongTensor` of shape `({0}, config.max_depth)`, *optional*):
+ Tag IDs for each token in the input sequence, padded up to config.max_depth.
+
+ xpath_subs_seq (`torch.LongTensor` of shape `({0}, config.max_depth)`, *optional*):
+ Subscript IDs for each token in the input sequence, padded up to config.max_depth.
+
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: `1` for
+ tokens that are NOT MASKED, `0` for MASKED tokens.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`: `0` corresponds to a *sentence A* token, `1` corresponds to a *sentence B* token
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: `1`
+ indicates the head is **not masked**, `0` indicates the head is **masked**.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ If set to `True`, the attentions tensors of all attention layers are returned. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ If set to `True`, the hidden states of all layers are returned. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ If set to `True`, the model will return a [`~file_utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare MarkupLM Model transformer outputting raw hidden-states without any specific head on top.",
+ MARKUPLM_START_DOCSTRING,
+)
+class MarkupLMModel(MarkupLMPreTrainedModel):
+ # Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->MarkupLM
+ def __init__(self, config, add_pooling_layer=True):
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = MarkupLMEmbeddings(config)
+ self.encoder = MarkupLMEncoder(config)
+
+ self.pooler = MarkupLMPooler(config) if add_pooling_layer else None
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.word_embeddings = value
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(MARKUPLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ xpath_tags_seq: Optional[torch.LongTensor] = None,
+ xpath_subs_seq: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPoolingAndCrossAttentions]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoProcessor, MarkupLMModel
+
+ >>> processor = AutoProcessor.from_pretrained("microsoft/markuplm-base")
+ >>> model = MarkupLMModel.from_pretrained("microsoft/markuplm-base")
+
+ >>> html_string = " Page Title "
+
+ >>> encoding = processor(html_string, return_tensors="pt")
+
+ >>> outputs = model(**encoding)
+ >>> last_hidden_states = outputs.last_hidden_state
+ >>> list(last_hidden_states.shape)
+ [1, 4, 768]
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ if attention_mask is None:
+ attention_mask = torch.ones(input_shape, device=device)
+
+ if token_type_ids is None:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
+
+ extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
+ extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)
+ extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
+
+ if head_mask is not None:
+ if head_mask.dim() == 1:
+ head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
+ head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
+ elif head_mask.dim() == 2:
+ head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
+ head_mask = head_mask.to(dtype=next(self.parameters()).dtype)
+ else:
+ head_mask = [None] * self.config.num_hidden_layers
+
+ embedding_output = self.embeddings(
+ input_ids=input_ids,
+ xpath_tags_seq=xpath_tags_seq,
+ xpath_subs_seq=xpath_subs_seq,
+ position_ids=position_ids,
+ token_type_ids=token_type_ids,
+ inputs_embeds=inputs_embeds,
+ )
+ encoder_outputs = self.encoder(
+ embedding_output,
+ extended_attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
+
+ if not return_dict:
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPoolingAndCrossAttentions(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ cross_attentions=encoder_outputs.cross_attentions,
+ )
+
+ # Copied from transformers.models.bert.modeling_bert.BertModel.prepare_inputs_for_generation
+ def prepare_inputs_for_generation(
+ self, input_ids, past_key_values=None, attention_mask=None, use_cache=True, **model_kwargs
+ ):
+ input_shape = input_ids.shape
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
+ if attention_mask is None:
+ attention_mask = input_ids.new_ones(input_shape)
+
+ # cut decoder_input_ids if past_key_values is used
+ if past_key_values is not None:
+ past_length = past_key_values[0][0].shape[2]
+
+ # Some generation methods already pass only the last input ID
+ if input_ids.shape[1] > past_length:
+ remove_prefix_length = past_length
+ else:
+ # Default to old behavior: keep only final ID
+ remove_prefix_length = input_ids.shape[1] - 1
+
+ input_ids = input_ids[:, remove_prefix_length:]
+
+ return {
+ "input_ids": input_ids,
+ "attention_mask": attention_mask,
+ "past_key_values": past_key_values,
+ "use_cache": use_cache,
+ }
+
+ # Copied from transformers.models.bert.modeling_bert.BertModel._reorder_cache
+ def _reorder_cache(self, past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+ return reordered_past
+
+
+@add_start_docstrings(
+ """
+ MarkupLM Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ MARKUPLM_START_DOCSTRING,
+)
+class MarkupLMForQuestionAnswering(MarkupLMPreTrainedModel):
+ # Copied from transformers.models.bert.modeling_bert.BertForQuestionAnswering.__init__ with bert->markuplm, Bert->MarkupLM
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.markuplm = MarkupLMModel(config, add_pooling_layer=False)
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(MARKUPLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ xpath_tags_seq: Optional[torch.Tensor] = None,
+ xpath_subs_seq: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ start_positions: Optional[torch.Tensor] = None,
+ end_positions: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoProcessor, MarkupLMForQuestionAnswering
+ >>> import torch
+
+ >>> processor = AutoProcessor.from_pretrained("microsoft/markuplm-base-finetuned-websrc")
+ >>> model = MarkupLMForQuestionAnswering.from_pretrained("microsoft/markuplm-base-finetuned-websrc")
+
+ >>> html_string = " My name is Niels "
+ >>> question = "What's his name?"
+
+ >>> encoding = processor(html_string, questions=question, return_tensors="pt")
+
+ >>> with torch.no_grad():
+ ... outputs = model(**encoding)
+
+ >>> answer_start_index = outputs.start_logits.argmax()
+ >>> answer_end_index = outputs.end_logits.argmax()
+
+ >>> predict_answer_tokens = encoding.input_ids[0, answer_start_index : answer_end_index + 1]
+ >>> processor.decode(predict_answer_tokens).strip()
+ 'Niels'
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.markuplm(
+ input_ids,
+ xpath_tags_seq=xpath_tags_seq,
+ xpath_subs_seq=xpath_subs_seq,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = logits.split(1, dim=-1)
+ start_logits = start_logits.squeeze(-1).contiguous()
+ end_logits = end_logits.squeeze(-1).contiguous()
+
+ total_loss = None
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, split add a dimension
+ if len(start_positions.size()) > 1:
+ start_positions = start_positions.squeeze(-1)
+ if len(end_positions.size()) > 1:
+ end_positions = end_positions.squeeze(-1)
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
+ ignored_index = start_logits.size(1)
+ start_positions.clamp_(0, ignored_index)
+ end_positions.clamp_(0, ignored_index)
+
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[2:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return QuestionAnsweringModelOutput(
+ loss=total_loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings("""MarkupLM Model with a `token_classification` head on top.""", MARKUPLM_START_DOCSTRING)
+class MarkupLMForTokenClassification(MarkupLMPreTrainedModel):
+ # Copied from transformers.models.bert.modeling_bert.BertForTokenClassification.__init__ with bert->markuplm, Bert->MarkupLM
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.markuplm = MarkupLMModel(config, add_pooling_layer=False)
+ classifier_dropout = (
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
+ )
+ self.dropout = nn.Dropout(classifier_dropout)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(MARKUPLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ xpath_tags_seq: Optional[torch.Tensor] = None,
+ xpath_subs_seq: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoProcessor, AutoModelForTokenClassification
+ >>> import torch
+
+ >>> processor = AutoProcessor.from_pretrained("microsoft/markuplm-base")
+ >>> processor.parse_html = False
+ >>> model = AutoModelForTokenClassification.from_pretrained("microsoft/markuplm-base", num_labels=7)
+
+ >>> nodes = ["hello", "world"]
+ >>> xpaths = ["/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span"]
+ >>> node_labels = [1, 2]
+ >>> encoding = processor(nodes=nodes, xpaths=xpaths, node_labels=node_labels, return_tensors="pt")
+
+ >>> with torch.no_grad():
+ ... outputs = model(**encoding)
+
+ >>> loss = outputs.loss
+ >>> logits = outputs.logits
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.markuplm(
+ input_ids,
+ xpath_tags_seq=xpath_tags_seq,
+ xpath_subs_seq=xpath_subs_seq,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+ prediction_scores = self.classifier(sequence_output) # (batch_size, seq_length, node_type_size)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(
+ prediction_scores.view(-1, self.config.num_labels),
+ labels.view(-1),
+ )
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TokenClassifierOutput(
+ loss=loss,
+ logits=prediction_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ MarkupLM Model transformer with a sequence classification/regression head on top (a linear layer on top of the
+ pooled output) e.g. for GLUE tasks.
+ """,
+ MARKUPLM_START_DOCSTRING,
+)
+class MarkupLMForSequenceClassification(MarkupLMPreTrainedModel):
+ # Copied from transformers.models.bert.modeling_bert.BertForSequenceClassification.__init__ with bert->markuplm, Bert->MarkupLM
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.config = config
+
+ self.markuplm = MarkupLMModel(config)
+ classifier_dropout = (
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
+ )
+ self.dropout = nn.Dropout(classifier_dropout)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(MARKUPLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ xpath_tags_seq: Optional[torch.Tensor] = None,
+ xpath_subs_seq: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoProcessor, AutoModelForSequenceClassification
+ >>> import torch
+
+ >>> processor = AutoProcessor.from_pretrained("microsoft/markuplm-base")
+ >>> model = AutoModelForSequenceClassification.from_pretrained("microsoft/markuplm-base", num_labels=7)
+
+ >>> html_string = " Page Title "
+ >>> encoding = processor(html_string, return_tensors="pt")
+
+ >>> with torch.no_grad():
+ ... outputs = model(**encoding)
+
+ >>> loss = outputs.loss
+ >>> logits = outputs.logits
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.markuplm(
+ input_ids,
+ xpath_tags_seq=xpath_tags_seq,
+ xpath_subs_seq=xpath_subs_seq,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = outputs[1]
+
+ pooled_output = self.dropout(pooled_output)
+ logits = self.classifier(pooled_output)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/markuplm/processing_markuplm.py b/venv/lib/python3.10/site-packages/transformers/models/markuplm/processing_markuplm.py
new file mode 100644
index 0000000000000000000000000000000000000000..81aaca9e5cce4a691d969462028c537f4673b1df
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/markuplm/processing_markuplm.py
@@ -0,0 +1,146 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Processor class for MarkupLM.
+"""
+from typing import Optional, Union
+
+from ...file_utils import TensorType
+from ...processing_utils import ProcessorMixin
+from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, TruncationStrategy
+
+
+class MarkupLMProcessor(ProcessorMixin):
+ r"""
+ Constructs a MarkupLM processor which combines a MarkupLM feature extractor and a MarkupLM tokenizer into a single
+ processor.
+
+ [`MarkupLMProcessor`] offers all the functionalities you need to prepare data for the model.
+
+ It first uses [`MarkupLMFeatureExtractor`] to extract nodes and corresponding xpaths from one or more HTML strings.
+ Next, these are provided to [`MarkupLMTokenizer`] or [`MarkupLMTokenizerFast`], which turns them into token-level
+ `input_ids`, `attention_mask`, `token_type_ids`, `xpath_tags_seq` and `xpath_subs_seq`.
+
+ Args:
+ feature_extractor (`MarkupLMFeatureExtractor`):
+ An instance of [`MarkupLMFeatureExtractor`]. The feature extractor is a required input.
+ tokenizer (`MarkupLMTokenizer` or `MarkupLMTokenizerFast`):
+ An instance of [`MarkupLMTokenizer`] or [`MarkupLMTokenizerFast`]. The tokenizer is a required input.
+ parse_html (`bool`, *optional*, defaults to `True`):
+ Whether or not to use `MarkupLMFeatureExtractor` to parse HTML strings into nodes and corresponding xpaths.
+ """
+
+ feature_extractor_class = "MarkupLMFeatureExtractor"
+ tokenizer_class = ("MarkupLMTokenizer", "MarkupLMTokenizerFast")
+ parse_html = True
+
+ def __call__(
+ self,
+ html_strings=None,
+ nodes=None,
+ xpaths=None,
+ node_labels=None,
+ questions=None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ This method first forwards the `html_strings` argument to [`~MarkupLMFeatureExtractor.__call__`]. Next, it
+ passes the `nodes` and `xpaths` along with the additional arguments to [`~MarkupLMTokenizer.__call__`] and
+ returns the output.
+
+ Optionally, one can also provide a `text` argument which is passed along as first sequence.
+
+ Please refer to the docstring of the above two methods for more information.
+ """
+ # first, create nodes and xpaths
+ if self.parse_html:
+ if html_strings is None:
+ raise ValueError("Make sure to pass HTML strings in case `parse_html` is set to `True`")
+
+ if nodes is not None or xpaths is not None or node_labels is not None:
+ raise ValueError(
+ "Please don't pass nodes, xpaths nor node labels in case `parse_html` is set to `True`"
+ )
+
+ features = self.feature_extractor(html_strings)
+ nodes = features["nodes"]
+ xpaths = features["xpaths"]
+ else:
+ if html_strings is not None:
+ raise ValueError("You have passed HTML strings but `parse_html` is set to `False`.")
+ if nodes is None or xpaths is None:
+ raise ValueError("Make sure to pass nodes and xpaths in case `parse_html` is set to `False`")
+
+ # # second, apply the tokenizer
+ if questions is not None and self.parse_html:
+ if isinstance(questions, str):
+ questions = [questions] # add batch dimension (as the feature extractor always adds a batch dimension)
+
+ encoded_inputs = self.tokenizer(
+ text=questions if questions is not None else nodes,
+ text_pair=nodes if questions is not None else None,
+ xpaths=xpaths,
+ node_labels=node_labels,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ return_tensors=return_tensors,
+ **kwargs,
+ )
+
+ return encoded_inputs
+
+ def batch_decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to TrOCRTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer
+ to the docstring of this method for more information.
+ """
+ return self.tokenizer.batch_decode(*args, **kwargs)
+
+ def decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to TrOCRTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the
+ docstring of this method for more information.
+ """
+ return self.tokenizer.decode(*args, **kwargs)
+
+ @property
+ def model_input_names(self):
+ tokenizer_input_names = self.tokenizer.model_input_names
+ return tokenizer_input_names
diff --git a/venv/lib/python3.10/site-packages/transformers/models/markuplm/tokenization_markuplm.py b/venv/lib/python3.10/site-packages/transformers/models/markuplm/tokenization_markuplm.py
new file mode 100644
index 0000000000000000000000000000000000000000..c77865abc934c99d41541b4644eb84b1b62406a4
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/markuplm/tokenization_markuplm.py
@@ -0,0 +1,1445 @@
+# coding=utf-8
+# Copyright Microsoft Research and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization class for MarkupLM."""
+
+import json
+import os
+from functools import lru_cache
+from typing import Dict, List, Optional, Tuple, Union
+
+import regex as re
+
+from ...file_utils import PaddingStrategy, TensorType, add_end_docstrings
+from ...tokenization_utils import AddedToken, PreTrainedTokenizer
+from ...tokenization_utils_base import (
+ ENCODE_KWARGS_DOCSTRING,
+ BatchEncoding,
+ EncodedInput,
+ PreTokenizedInput,
+ TextInput,
+ TextInputPair,
+ TruncationStrategy,
+)
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
+
+
+MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r"""
+ add_special_tokens (`bool`, *optional*, defaults to `True`):
+ Whether or not to encode the sequences with the special tokens relative to their model.
+ padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
+ Activates and controls padding. Accepts the following values:
+
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
+ sequence if provided).
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
+ acceptable input length for the model if that argument is not provided.
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
+ lengths).
+ truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
+ Activates and controls truncation. Accepts the following values:
+
+ - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
+ to the maximum acceptable input length for the model if that argument is not provided. This will
+ truncate token by token, removing a token from the longest sequence in the pair if a pair of
+ sequences (or a batch of pairs) is provided.
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
+ maximum acceptable input length for the model if that argument is not provided. This will only
+ truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
+ maximum acceptable input length for the model if that argument is not provided. This will only
+ truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
+ - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
+ greater than the model maximum admissible input size).
+ max_length (`int`, *optional*):
+ Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to
+ `None`, this will use the predefined model maximum length if a maximum length is required by one of the
+ truncation/padding parameters. If the model has no specific maximum input length (like XLNet)
+ truncation/padding to a maximum length will be deactivated.
+ stride (`int`, *optional*, defaults to 0):
+ If set to a number along with `max_length`, the overflowing tokens returned when
+ `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
+ returned to provide some overlap between truncated and overflowing sequences. The value of this
+ argument defines the number of overlapping tokens.
+ pad_to_multiple_of (`int`, *optional*):
+ If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
+ the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
+ return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
+ If set, will return tensors instead of list of python integers. Acceptable values are:
+
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
+ - `'np'`: Return Numpy `np.ndarray` objects.
+"""
+
+
+@lru_cache()
+def bytes_to_unicode():
+ """
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
+ characters the bpe code barfs on. The reversible bpe codes work on unicode strings. This means you need a large #
+ of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset
+ you end up needing around 5K for decent coverage. This is a significant percentage of your normal, say, 32K bpe
+ vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
+ """
+ bs = (
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
+ )
+ cs = bs[:]
+ n = 0
+ for b in range(2**8):
+ if b not in bs:
+ bs.append(b)
+ cs.append(2**8 + n)
+ n += 1
+ cs = [chr(n) for n in cs]
+ return dict(zip(bs, cs))
+
+
+def get_pairs(word):
+ """
+ Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length
+ strings).
+ """
+ pairs = set()
+ prev_char = word[0]
+ for char in word[1:]:
+ pairs.add((prev_char, char))
+ prev_char = char
+ return pairs
+
+
+class MarkupLMTokenizer(PreTrainedTokenizer):
+ r"""
+ Construct a MarkupLM tokenizer. Based on byte-level Byte-Pair-Encoding (BPE). [`MarkupLMTokenizer`] can be used to
+ turn HTML strings into to token-level `input_ids`, `attention_mask`, `token_type_ids`, `xpath_tags_seq` and
+ `xpath_tags_seq`. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods.
+ Users should refer to this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ merges_file (`str`):
+ Path to the merges file.
+ errors (`str`, *optional*, defaults to `"replace"`):
+ Paradigm to follow when decoding bytes to UTF-8. See
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
+ The token used is the `sep_token`.
+
+
+
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ cls_token (`str`, *optional*, defaults to `""`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ mask_token (`str`, *optional*, defaults to `""`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
+ other word. (RoBERTa tokenizer detect beginning of words by the preceding space).
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+
+ def __init__(
+ self,
+ vocab_file,
+ merges_file,
+ tags_dict,
+ errors="replace",
+ bos_token="",
+ eos_token="",
+ sep_token="",
+ cls_token="",
+ unk_token="",
+ pad_token="",
+ mask_token="",
+ add_prefix_space=False,
+ max_depth=50,
+ max_width=1000,
+ pad_width=1001,
+ pad_token_label=-100,
+ only_label_first_subword=True,
+ **kwargs,
+ ):
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
+ sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
+ cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
+
+ # Mask token behave like a normal word, i.e. include the space before it
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
+
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
+ self.encoder = json.load(vocab_handle)
+
+ self.tags_dict = tags_dict
+ self.decoder = {v: k for k, v in self.encoder.items()}
+ self.errors = errors # how to handle errors in decoding
+ self.byte_encoder = bytes_to_unicode()
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
+ with open(merges_file, encoding="utf-8") as merges_handle:
+ bpe_merges = merges_handle.read().split("\n")[1:-1]
+ bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
+ self.cache = {}
+ self.add_prefix_space = add_prefix_space
+
+ # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
+ self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
+
+ # additional properties
+ self.max_depth = max_depth
+ self.max_width = max_width
+ self.pad_width = pad_width
+ self.unk_tag_id = len(self.tags_dict)
+ self.pad_tag_id = self.unk_tag_id + 1
+ self.pad_xpath_tags_seq = [self.pad_tag_id] * self.max_depth
+ self.pad_xpath_subs_seq = [self.pad_width] * self.max_depth
+
+ super().__init__(
+ vocab_file=vocab_file,
+ merges_file=merges_file,
+ tags_dict=tags_dict,
+ errors=errors,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ cls_token=cls_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ add_prefix_space=add_prefix_space,
+ max_depth=max_depth,
+ max_width=max_width,
+ pad_width=pad_width,
+ pad_token_label=pad_token_label,
+ only_label_first_subword=only_label_first_subword,
+ **kwargs,
+ )
+
+ self.pad_token_label = pad_token_label
+ self.only_label_first_subword = only_label_first_subword
+
+ def get_xpath_seq(self, xpath):
+ """
+ Given the xpath expression of one particular node (like "/html/body/div/li[1]/div/span[2]"), return a list of
+ tag IDs and corresponding subscripts, taking into account max depth.
+ """
+ xpath_tags_list = []
+ xpath_subs_list = []
+
+ xpath_units = xpath.split("/")
+ for unit in xpath_units:
+ if not unit.strip():
+ continue
+ name_subs = unit.strip().split("[")
+ tag_name = name_subs[0]
+ sub = 0 if len(name_subs) == 1 else int(name_subs[1][:-1])
+ xpath_tags_list.append(self.tags_dict.get(tag_name, self.unk_tag_id))
+ xpath_subs_list.append(min(self.max_width, sub))
+
+ xpath_tags_list = xpath_tags_list[: self.max_depth]
+ xpath_subs_list = xpath_subs_list[: self.max_depth]
+ xpath_tags_list += [self.pad_tag_id] * (self.max_depth - len(xpath_tags_list))
+ xpath_subs_list += [self.pad_width] * (self.max_depth - len(xpath_subs_list))
+
+ return xpath_tags_list, xpath_subs_list
+
+ @property
+ def vocab_size(self):
+ return len(self.encoder)
+
+ def get_vocab(self):
+ vocab = self.encoder.copy()
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ def bpe(self, token):
+ if token in self.cache:
+ return self.cache[token]
+ word = tuple(token)
+ pairs = get_pairs(word)
+
+ if not pairs:
+ return token
+
+ while True:
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
+ if bigram not in self.bpe_ranks:
+ break
+ first, second = bigram
+ new_word = []
+ i = 0
+ while i < len(word):
+ try:
+ j = word.index(first, i)
+ except ValueError:
+ new_word.extend(word[i:])
+ break
+ else:
+ new_word.extend(word[i:j])
+ i = j
+
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
+ new_word.append(first + second)
+ i += 2
+ else:
+ new_word.append(word[i])
+ i += 1
+ new_word = tuple(new_word)
+ word = new_word
+ if len(word) == 1:
+ break
+ else:
+ pairs = get_pairs(word)
+ word = " ".join(word)
+ self.cache[token] = word
+ return word
+
+ def _tokenize(self, text):
+ """Tokenize a string."""
+ bpe_tokens = []
+ for token in re.findall(self.pat, text):
+ token = "".join(
+ self.byte_encoder[b] for b in token.encode("utf-8")
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
+ bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
+ return bpe_tokens
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.decoder.get(index)
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ logger.warning(
+ "MarkupLM now does not support generative tasks, decoding is experimental and subject to change."
+ )
+ text = "".join(tokens)
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
+ return text
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+ merge_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
+ )
+
+ # save vocab_file
+ with open(vocab_file, "w", encoding="utf-8") as f:
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
+
+ # save merge_file
+ index = 0
+ with open(merge_file, "w", encoding="utf-8") as writer:
+ writer.write("#version: 0.2\n")
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
+ if index != token_index:
+ logger.warning(
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
+ " Please check that the tokenizer is not corrupted!"
+ )
+ index = token_index
+ writer.write(" ".join(bpe_tokens) + "\n")
+ index += 1
+
+ return vocab_file, merge_file
+
+ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
+ add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
+ if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()):
+ text = " " + text
+ return (text, kwargs)
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A RoBERTa sequence has the following format:
+ - single sequence: ` X `
+ - pair of sequences: ` A B `
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ return cls + token_ids_0 + sep + token_ids_1 + sep
+
+ def build_xpath_tags_with_special_tokens(
+ self, xpath_tags_0: List[int], xpath_tags_1: Optional[List[int]] = None
+ ) -> List[int]:
+ pad = [self.pad_xpath_tags_seq]
+ if len(xpath_tags_1) == 0:
+ return pad + xpath_tags_0 + pad
+ return pad + xpath_tags_0 + pad + xpath_tags_1 + pad
+
+ def build_xpath_subs_with_special_tokens(
+ self, xpath_subs_0: List[int], xpath_subs_1: Optional[List[int]] = None
+ ) -> List[int]:
+ pad = [self.pad_xpath_subs_seq]
+ if len(xpath_subs_1) == 0:
+ return pad + xpath_subs_0 + pad
+ return pad + xpath_subs_0 + pad + xpath_subs_1 + pad
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Args:
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is None:
+ return [1] + ([0] * len(token_ids_0)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not
+ make use of token type ids, therefore a list of zeros is returned.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ Returns:
+ `List[int]`: List of zeros.
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep + token_ids_1 + sep) * [0]
+
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
+ def __call__(
+ self,
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
+ text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
+ xpaths: Union[List[List[int]], List[List[List[int]]]] = None,
+ node_labels: Optional[Union[List[int], List[List[int]]]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
+ sequences with node-level xpaths and optional labels.
+
+ Args:
+ text (`str`, `List[str]`, `List[List[str]]`):
+ The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
+ (nodes of a single example or questions of a batch of examples) or a list of list of strings (batch of
+ nodes).
+ text_pair (`List[str]`, `List[List[str]]`):
+ The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
+ (pretokenized string).
+ xpaths (`List[List[int]]`, `List[List[List[int]]]`):
+ Node-level xpaths.
+ node_labels (`List[int]`, `List[List[int]]`, *optional*):
+ Node-level integer labels (for token classification tasks).
+ """
+
+ # Input type checking for clearer error
+ def _is_valid_text_input(t):
+ if isinstance(t, str):
+ # Strings are fine
+ return True
+ elif isinstance(t, (list, tuple)):
+ # List are fine as long as they are...
+ if len(t) == 0:
+ # ... empty
+ return True
+ elif isinstance(t[0], str):
+ # ... list of strings
+ return True
+ elif isinstance(t[0], (list, tuple)):
+ # ... list with an empty list or with a list of strings
+ return len(t[0]) == 0 or isinstance(t[0][0], str)
+ else:
+ return False
+ else:
+ return False
+
+ if text_pair is not None:
+ # in case text + text_pair are provided, text = questions, text_pair = nodes
+ if not _is_valid_text_input(text):
+ raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ")
+ if not isinstance(text_pair, (list, tuple)):
+ raise ValueError(
+ "Nodes must be of type `List[str]` (single pretokenized example), "
+ "or `List[List[str]]` (batch of pretokenized examples)."
+ )
+ else:
+ # in case only text is provided => must be nodes
+ if not isinstance(text, (list, tuple)):
+ raise ValueError(
+ "Nodes must be of type `List[str]` (single pretokenized example), "
+ "or `List[List[str]]` (batch of pretokenized examples)."
+ )
+
+ if text_pair is not None:
+ is_batched = isinstance(text, (list, tuple))
+ else:
+ is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
+
+ nodes = text if text_pair is None else text_pair
+ assert xpaths is not None, "You must provide corresponding xpaths"
+ if is_batched:
+ assert len(nodes) == len(xpaths), "You must provide nodes and xpaths for an equal amount of examples"
+ for nodes_example, xpaths_example in zip(nodes, xpaths):
+ assert len(nodes_example) == len(xpaths_example), "You must provide as many nodes as there are xpaths"
+ else:
+ assert len(nodes) == len(xpaths), "You must provide as many nodes as there are xpaths"
+
+ if is_batched:
+ if text_pair is not None and len(text) != len(text_pair):
+ raise ValueError(
+ f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
+ f" {len(text_pair)}."
+ )
+ batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
+ is_pair = bool(text_pair is not None)
+ return self.batch_encode_plus(
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
+ is_pair=is_pair,
+ xpaths=xpaths,
+ node_labels=node_labels,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+ else:
+ return self.encode_plus(
+ text=text,
+ text_pair=text_pair,
+ xpaths=xpaths,
+ node_labels=node_labels,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
+ def batch_encode_plus(
+ self,
+ batch_text_or_text_pairs: Union[
+ List[TextInput],
+ List[TextInputPair],
+ List[PreTokenizedInput],
+ ],
+ is_pair: bool = None,
+ xpaths: Optional[List[List[List[int]]]] = None,
+ node_labels: Optional[Union[List[int], List[List[int]]]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ return self._batch_encode_plus(
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
+ is_pair=is_pair,
+ xpaths=xpaths,
+ node_labels=node_labels,
+ add_special_tokens=add_special_tokens,
+ padding_strategy=padding_strategy,
+ truncation_strategy=truncation_strategy,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ def _batch_encode_plus(
+ self,
+ batch_text_or_text_pairs: Union[
+ List[TextInput],
+ List[TextInputPair],
+ List[PreTokenizedInput],
+ ],
+ is_pair: bool = None,
+ xpaths: Optional[List[List[List[int]]]] = None,
+ node_labels: Optional[List[List[int]]] = None,
+ add_special_tokens: bool = True,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ if return_offsets_mapping:
+ raise NotImplementedError(
+ "return_offset_mapping is not available when using Python tokenizers. "
+ "To use this feature, change your tokenizer to one deriving from "
+ "transformers.PreTrainedTokenizerFast."
+ )
+
+ batch_outputs = self._batch_prepare_for_model(
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
+ is_pair=is_pair,
+ xpaths=xpaths,
+ node_labels=node_labels,
+ add_special_tokens=add_special_tokens,
+ padding_strategy=padding_strategy,
+ truncation_strategy=truncation_strategy,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_attention_mask=return_attention_mask,
+ return_token_type_ids=return_token_type_ids,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_length=return_length,
+ return_tensors=return_tensors,
+ verbose=verbose,
+ )
+
+ return BatchEncoding(batch_outputs)
+
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
+ def _batch_prepare_for_model(
+ self,
+ batch_text_or_text_pairs,
+ is_pair: bool = None,
+ xpaths: Optional[List[List[int]]] = None,
+ node_labels: Optional[List[List[int]]] = None,
+ add_special_tokens: bool = True,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[str] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ ) -> BatchEncoding:
+ """
+ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
+ adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
+ manages a moving window (with user defined stride) for overflowing tokens.
+
+ Args:
+ batch_ids_pairs: list of tokenized input ids or input ids pairs
+ """
+
+ batch_outputs = {}
+ for idx, example in enumerate(zip(batch_text_or_text_pairs, xpaths)):
+ batch_text_or_text_pair, xpaths_example = example
+ outputs = self.prepare_for_model(
+ batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair,
+ batch_text_or_text_pair[1] if is_pair else None,
+ xpaths_example,
+ node_labels=node_labels[idx] if node_labels is not None else None,
+ add_special_tokens=add_special_tokens,
+ padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward
+ truncation=truncation_strategy.value,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=None, # we pad in batch afterward
+ return_attention_mask=False, # we pad in batch afterward
+ return_token_type_ids=return_token_type_ids,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_length=return_length,
+ return_tensors=None, # We convert the whole batch to tensors at the end
+ prepend_batch_axis=False,
+ verbose=verbose,
+ )
+
+ for key, value in outputs.items():
+ if key not in batch_outputs:
+ batch_outputs[key] = []
+ batch_outputs[key].append(value)
+
+ batch_outputs = self.pad(
+ batch_outputs,
+ padding=padding_strategy.value,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_attention_mask=return_attention_mask,
+ )
+
+ batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
+
+ return batch_outputs
+
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING)
+ def encode(
+ self,
+ text: Union[TextInput, PreTokenizedInput],
+ text_pair: Optional[PreTokenizedInput] = None,
+ xpaths: Optional[List[List[int]]] = None,
+ node_labels: Optional[List[int]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> List[int]:
+ encoded_inputs = self.encode_plus(
+ text=text,
+ text_pair=text_pair,
+ xpaths=xpaths,
+ node_labels=node_labels,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ return encoded_inputs["input_ids"]
+
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
+ def encode_plus(
+ self,
+ text: Union[TextInput, PreTokenizedInput],
+ text_pair: Optional[PreTokenizedInput] = None,
+ xpaths: Optional[List[List[int]]] = None,
+ node_labels: Optional[List[int]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
+ `__call__` should be used instead.
+
+ Args:
+ text (`str`, `List[str]`, `List[List[str]]`):
+ The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
+ text_pair (`List[str]` or `List[int]`, *optional*):
+ Optional second sequence to be encoded. This can be a list of strings (nodes of a single example) or a
+ list of list of strings (nodes of a batch of examples).
+ """
+
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ return self._encode_plus(
+ text=text,
+ xpaths=xpaths,
+ text_pair=text_pair,
+ node_labels=node_labels,
+ add_special_tokens=add_special_tokens,
+ padding_strategy=padding_strategy,
+ truncation_strategy=truncation_strategy,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ def _encode_plus(
+ self,
+ text: Union[TextInput, PreTokenizedInput],
+ text_pair: Optional[PreTokenizedInput] = None,
+ xpaths: Optional[List[List[int]]] = None,
+ node_labels: Optional[List[int]] = None,
+ add_special_tokens: bool = True,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ if return_offsets_mapping:
+ raise NotImplementedError(
+ "return_offset_mapping is not available when using Python tokenizers. "
+ "To use this feature, change your tokenizer to one deriving from "
+ "transformers.PreTrainedTokenizerFast. "
+ "More information on available tokenizers at "
+ "https://github.com/huggingface/transformers/pull/2674"
+ )
+
+ return self.prepare_for_model(
+ text=text,
+ text_pair=text_pair,
+ xpaths=xpaths,
+ node_labels=node_labels,
+ add_special_tokens=add_special_tokens,
+ padding=padding_strategy.value,
+ truncation=truncation_strategy.value,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ prepend_batch_axis=True,
+ return_attention_mask=return_attention_mask,
+ return_token_type_ids=return_token_type_ids,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_length=return_length,
+ verbose=verbose,
+ )
+
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
+ def prepare_for_model(
+ self,
+ text: Union[TextInput, PreTokenizedInput],
+ text_pair: Optional[PreTokenizedInput] = None,
+ xpaths: Optional[List[List[int]]] = None,
+ node_labels: Optional[List[int]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ prepend_batch_axis: bool = False,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens,
+ truncates sequences if overflowing while taking into account the special tokens and manages a moving window
+ (with user defined stride) for overflowing tokens. Please Note, for *text_pair* different than `None` and
+ *truncation_strategy = longest_first* or `True`, it is not possible to return overflowing tokens. Such a
+ combination of arguments will raise an error.
+
+ Node-level `xpaths` are turned into token-level `xpath_tags_seq` and `xpath_subs_seq`. If provided, node-level
+ `node_labels` are turned into token-level `labels`. The node label is used for the first token of the node,
+ while remaining tokens are labeled with -100, such that they will be ignored by the loss function.
+
+ Args:
+ text (`str`, `List[str]`, `List[List[str]]`):
+ The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
+ text_pair (`List[str]` or `List[int]`, *optional*):
+ Optional second sequence to be encoded. This can be a list of strings (nodes of a single example) or a
+ list of list of strings (nodes of a batch of examples).
+ """
+
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ tokens = []
+ pair_tokens = []
+ xpath_tags_seq = []
+ xpath_subs_seq = []
+ pair_xpath_tags_seq = []
+ pair_xpath_subs_seq = []
+ labels = []
+
+ if text_pair is None:
+ if node_labels is None:
+ # CASE 1: web page classification (training + inference) + CASE 2: token classification (inference)
+ for word, xpath in zip(text, xpaths):
+ if len(word) < 1: # skip empty nodes
+ continue
+ word_tokens = self.tokenize(word)
+ tokens.extend(word_tokens)
+ xpath_tags_list, xpath_subs_list = self.get_xpath_seq(xpath)
+ xpath_tags_seq.extend([xpath_tags_list] * len(word_tokens))
+ xpath_subs_seq.extend([xpath_subs_list] * len(word_tokens))
+ else:
+ # CASE 2: token classification (training)
+ for word, xpath, label in zip(text, xpaths, node_labels):
+ if len(word) < 1: # skip empty nodes
+ continue
+ word_tokens = self.tokenize(word)
+ tokens.extend(word_tokens)
+ xpath_tags_list, xpath_subs_list = self.get_xpath_seq(xpath)
+ xpath_tags_seq.extend([xpath_tags_list] * len(word_tokens))
+ xpath_subs_seq.extend([xpath_subs_list] * len(word_tokens))
+ if self.only_label_first_subword:
+ # Use the real label id for the first token of the word, and padding ids for the remaining tokens
+ labels.extend([label] + [self.pad_token_label] * (len(word_tokens) - 1))
+ else:
+ labels.extend([label] * len(word_tokens))
+ else:
+ # CASE 3: web page question answering (inference)
+ # text = question
+ # text_pair = nodes
+ tokens = self.tokenize(text)
+ xpath_tags_seq = [self.pad_xpath_tags_seq for _ in range(len(tokens))]
+ xpath_subs_seq = [self.pad_xpath_subs_seq for _ in range(len(tokens))]
+
+ for word, xpath in zip(text_pair, xpaths):
+ if len(word) < 1: # skip empty nodes
+ continue
+ word_tokens = self.tokenize(word)
+ pair_tokens.extend(word_tokens)
+ xpath_tags_list, xpath_subs_list = self.get_xpath_seq(xpath)
+ pair_xpath_tags_seq.extend([xpath_tags_list] * len(word_tokens))
+ pair_xpath_subs_seq.extend([xpath_subs_list] * len(word_tokens))
+
+ # Create ids + pair_ids
+ ids = self.convert_tokens_to_ids(tokens)
+ pair_ids = self.convert_tokens_to_ids(pair_tokens) if pair_tokens else None
+
+ if (
+ return_overflowing_tokens
+ and truncation_strategy == TruncationStrategy.LONGEST_FIRST
+ and pair_ids is not None
+ ):
+ raise ValueError(
+ "Not possible to return overflowing tokens for pair of sequences with the "
+ "`longest_first`. Please select another truncation strategy than `longest_first`, "
+ "for instance `only_second` or `only_first`."
+ )
+
+ # Compute the total size of the returned encodings
+ pair = bool(pair_ids is not None)
+ len_ids = len(ids)
+ len_pair_ids = len(pair_ids) if pair else 0
+ total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
+
+ # Truncation: Handle max sequence length
+ overflowing_tokens = []
+ overflowing_xpath_tags_seq = []
+ overflowing_xpath_subs_seq = []
+ overflowing_labels = []
+ if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length:
+ (
+ ids,
+ xpath_tags_seq,
+ xpath_subs_seq,
+ pair_ids,
+ pair_xpath_tags_seq,
+ pair_xpath_subs_seq,
+ labels,
+ overflowing_tokens,
+ overflowing_xpath_tags_seq,
+ overflowing_xpath_subs_seq,
+ overflowing_labels,
+ ) = self.truncate_sequences(
+ ids,
+ xpath_tags_seq=xpath_tags_seq,
+ xpath_subs_seq=xpath_subs_seq,
+ pair_ids=pair_ids,
+ pair_xpath_tags_seq=pair_xpath_tags_seq,
+ pair_xpath_subs_seq=pair_xpath_subs_seq,
+ labels=labels,
+ num_tokens_to_remove=total_len - max_length,
+ truncation_strategy=truncation_strategy,
+ stride=stride,
+ )
+
+ if return_token_type_ids and not add_special_tokens:
+ raise ValueError(
+ "Asking to return token_type_ids while setting add_special_tokens to False "
+ "results in an undefined behavior. Please set add_special_tokens to True or "
+ "set return_token_type_ids to None."
+ )
+
+ # Load from model defaults
+ if return_token_type_ids is None:
+ return_token_type_ids = "token_type_ids" in self.model_input_names
+ if return_attention_mask is None:
+ return_attention_mask = "attention_mask" in self.model_input_names
+
+ encoded_inputs = {}
+
+ if return_overflowing_tokens:
+ encoded_inputs["overflowing_tokens"] = overflowing_tokens
+ encoded_inputs["overflowing_xpath_tags_seq"] = overflowing_xpath_tags_seq
+ encoded_inputs["overflowing_xpath_subs_seq"] = overflowing_xpath_subs_seq
+ encoded_inputs["overflowing_labels"] = overflowing_labels
+ encoded_inputs["num_truncated_tokens"] = total_len - max_length
+
+ # Add special tokens
+ if add_special_tokens:
+ sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
+ token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
+ xpath_tags_ids = self.build_xpath_tags_with_special_tokens(xpath_tags_seq, pair_xpath_tags_seq)
+ xpath_subs_ids = self.build_xpath_subs_with_special_tokens(xpath_subs_seq, pair_xpath_subs_seq)
+ if labels:
+ labels = [self.pad_token_label] + labels + [self.pad_token_label]
+ else:
+ sequence = ids + pair_ids if pair else ids
+ token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else [])
+ xpath_tags_ids = xpath_tags_seq + pair_xpath_tags_seq if pair else xpath_tags_seq
+ xpath_subs_ids = xpath_subs_seq + pair_xpath_subs_seq if pair else xpath_subs_seq
+
+ # Build output dictionary
+ encoded_inputs["input_ids"] = sequence
+ encoded_inputs["xpath_tags_seq"] = xpath_tags_ids
+ encoded_inputs["xpath_subs_seq"] = xpath_subs_ids
+ if return_token_type_ids:
+ encoded_inputs["token_type_ids"] = token_type_ids
+ if return_special_tokens_mask:
+ if add_special_tokens:
+ encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids)
+ else:
+ encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
+
+ if labels:
+ encoded_inputs["labels"] = labels
+
+ # Check lengths
+ self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose)
+
+ # Padding
+ if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
+ encoded_inputs = self.pad(
+ encoded_inputs,
+ max_length=max_length,
+ padding=padding_strategy.value,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_attention_mask=return_attention_mask,
+ )
+
+ if return_length:
+ encoded_inputs["length"] = len(encoded_inputs["input_ids"])
+
+ batch_outputs = BatchEncoding(
+ encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis
+ )
+
+ return batch_outputs
+
+ def truncate_sequences(
+ self,
+ ids: List[int],
+ xpath_tags_seq: List[List[int]],
+ xpath_subs_seq: List[List[int]],
+ pair_ids: Optional[List[int]] = None,
+ pair_xpath_tags_seq: Optional[List[List[int]]] = None,
+ pair_xpath_subs_seq: Optional[List[List[int]]] = None,
+ labels: Optional[List[int]] = None,
+ num_tokens_to_remove: int = 0,
+ truncation_strategy: Union[str, TruncationStrategy] = "longest_first",
+ stride: int = 0,
+ ) -> Tuple[List[int], List[int], List[int]]:
+ """
+ Args:
+ Truncates a sequence pair in-place following the strategy.
+ ids (`List[int]`):
+ Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
+ `convert_tokens_to_ids` methods.
+ xpath_tags_seq (`List[List[int]]`):
+ XPath tag IDs of the first sequence.
+ xpath_subs_seq (`List[List[int]]`):
+ XPath sub IDs of the first sequence.
+ pair_ids (`List[int]`, *optional*):
+ Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
+ and `convert_tokens_to_ids` methods.
+ pair_xpath_tags_seq (`List[List[int]]`, *optional*):
+ XPath tag IDs of the second sequence.
+ pair_xpath_subs_seq (`List[List[int]]`, *optional*):
+ XPath sub IDs of the second sequence.
+ num_tokens_to_remove (`int`, *optional*, defaults to 0):
+ Number of tokens to remove using the truncation strategy.
+ truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to
+ `False`):
+ The strategy to follow for truncation. Can be:
+ - `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
+ maximum acceptable input length for the model if that argument is not provided. This will truncate
+ token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a
+ batch of pairs) is provided.
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
+ maximum acceptable input length for the model if that argument is not provided. This will only
+ truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
+ maximum acceptable input length for the model if that argument is not provided. This will only
+ truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
+ - `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater
+ than the model maximum admissible input size).
+ stride (`int`, *optional*, defaults to 0):
+ If set to a positive number, the overflowing tokens returned will contain some tokens from the main
+ sequence returned. The value of this argument defines the number of additional tokens.
+ Returns:
+ `Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of
+ overflowing tokens. Note: The *longest_first* strategy returns empty list of overflowing tokens if a pair
+ of sequences (or a batch of pairs) is provided.
+ """
+ if num_tokens_to_remove <= 0:
+ return ids, xpath_tags_seq, xpath_subs_seq, pair_ids, pair_xpath_tags_seq, pair_xpath_subs_seq, [], [], []
+
+ if not isinstance(truncation_strategy, TruncationStrategy):
+ truncation_strategy = TruncationStrategy(truncation_strategy)
+
+ overflowing_tokens = []
+ overflowing_xpath_tags_seq = []
+ overflowing_xpath_subs_seq = []
+ overflowing_labels = []
+ if truncation_strategy == TruncationStrategy.ONLY_FIRST or (
+ truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is None
+ ):
+ if len(ids) > num_tokens_to_remove:
+ window_len = min(len(ids), stride + num_tokens_to_remove)
+ overflowing_tokens = ids[-window_len:]
+ overflowing_xpath_tags_seq = xpath_tags_seq[-window_len:]
+ overflowing_xpath_subs_seq = xpath_subs_seq[-window_len:]
+ ids = ids[:-num_tokens_to_remove]
+ xpath_tags_seq = xpath_tags_seq[:-num_tokens_to_remove]
+ xpath_subs_seq = xpath_subs_seq[:-num_tokens_to_remove]
+ labels = labels[:-num_tokens_to_remove]
+ else:
+ error_msg = (
+ f"We need to remove {num_tokens_to_remove} to truncate the input "
+ f"but the first sequence has a length {len(ids)}. "
+ )
+ if truncation_strategy == TruncationStrategy.ONLY_FIRST:
+ error_msg = (
+ error_msg + "Please select another truncation strategy than "
+ f"{truncation_strategy}, for instance 'longest_first' or 'only_second'."
+ )
+ logger.error(error_msg)
+ elif truncation_strategy == TruncationStrategy.LONGEST_FIRST:
+ logger.warning(
+ "Be aware, overflowing tokens are not returned for the setting you have chosen,"
+ f" i.e. sequence pairs with the '{TruncationStrategy.LONGEST_FIRST.value}' "
+ "truncation strategy. So the returned list will always be empty even if some "
+ "tokens have been removed."
+ )
+ for _ in range(num_tokens_to_remove):
+ if pair_ids is None or len(ids) > len(pair_ids):
+ ids = ids[:-1]
+ xpath_tags_seq = xpath_tags_seq[:-1]
+ xpath_subs_seq = xpath_subs_seq[:-1]
+ labels = labels[:-1]
+ else:
+ pair_ids = pair_ids[:-1]
+ pair_xpath_tags_seq = pair_xpath_tags_seq[:-1]
+ pair_xpath_subs_seq = pair_xpath_subs_seq[:-1]
+ elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None:
+ if len(pair_ids) > num_tokens_to_remove:
+ window_len = min(len(pair_ids), stride + num_tokens_to_remove)
+ overflowing_tokens = pair_ids[-window_len:]
+ overflowing_xpath_tags_seq = pair_xpath_tags_seq[-window_len:]
+ overflowing_xpath_subs_seq = pair_xpath_subs_seq[-window_len:]
+ pair_ids = pair_ids[:-num_tokens_to_remove]
+ pair_xpath_tags_seq = pair_xpath_tags_seq[:-num_tokens_to_remove]
+ pair_xpath_subs_seq = pair_xpath_subs_seq[:-num_tokens_to_remove]
+ else:
+ logger.error(
+ f"We need to remove {num_tokens_to_remove} to truncate the input "
+ f"but the second sequence has a length {len(pair_ids)}. "
+ f"Please select another truncation strategy than {truncation_strategy}, "
+ "for instance 'longest_first' or 'only_first'."
+ )
+
+ return (
+ ids,
+ xpath_tags_seq,
+ xpath_subs_seq,
+ pair_ids,
+ pair_xpath_tags_seq,
+ pair_xpath_subs_seq,
+ labels,
+ overflowing_tokens,
+ overflowing_xpath_tags_seq,
+ overflowing_xpath_subs_seq,
+ overflowing_labels,
+ )
+
+ def _pad(
+ self,
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
+ max_length: Optional[int] = None,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ pad_to_multiple_of: Optional[int] = None,
+ return_attention_mask: Optional[bool] = None,
+ ) -> dict:
+ """
+ Args:
+ Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
+ encoded_inputs:
+ Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
+ max_length: maximum length of the returned list and optionally padding length (see below).
+ Will truncate by taking into account the special tokens.
+ padding_strategy: PaddingStrategy to use for padding.
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
+ The tokenizer padding sides are defined in self.padding_side:
+ - 'left': pads on the left of the sequences
+ - 'right': pads on the right of the sequences
+ pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
+ This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
+ `>= 7.5` (Volta).
+ return_attention_mask:
+ (optional) Set to False to avoid returning attention mask (default: set to model specifics)
+ """
+ # Load from model defaults
+ if return_attention_mask is None:
+ return_attention_mask = "attention_mask" in self.model_input_names
+
+ required_input = encoded_inputs[self.model_input_names[0]]
+
+ if padding_strategy == PaddingStrategy.LONGEST:
+ max_length = len(required_input)
+
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
+
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
+
+ # Initialize attention mask if not present.
+ if return_attention_mask and "attention_mask" not in encoded_inputs:
+ encoded_inputs["attention_mask"] = [1] * len(required_input)
+
+ if needs_to_be_padded:
+ difference = max_length - len(required_input)
+ if self.padding_side == "right":
+ if return_attention_mask:
+ encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
+ if "token_type_ids" in encoded_inputs:
+ encoded_inputs["token_type_ids"] = (
+ encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
+ )
+ if "xpath_tags_seq" in encoded_inputs:
+ encoded_inputs["xpath_tags_seq"] = (
+ encoded_inputs["xpath_tags_seq"] + [self.pad_xpath_tags_seq] * difference
+ )
+ if "xpath_subs_seq" in encoded_inputs:
+ encoded_inputs["xpath_subs_seq"] = (
+ encoded_inputs["xpath_subs_seq"] + [self.pad_xpath_subs_seq] * difference
+ )
+ if "labels" in encoded_inputs:
+ encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference
+ if "special_tokens_mask" in encoded_inputs:
+ encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
+ encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
+ elif self.padding_side == "left":
+ if return_attention_mask:
+ encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
+ if "token_type_ids" in encoded_inputs:
+ encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
+ "token_type_ids"
+ ]
+ if "xpath_tags_seq" in encoded_inputs:
+ encoded_inputs["xpath_tags_seq"] = [self.pad_xpath_tags_seq] * difference + encoded_inputs[
+ "xpath_tags_seq"
+ ]
+ if "xpath_subs_seq" in encoded_inputs:
+ encoded_inputs["xpath_subs_seq"] = [self.pad_xpath_subs_seq] * difference + encoded_inputs[
+ "xpath_subs_seq"
+ ]
+ if "labels" in encoded_inputs:
+ encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"]
+ if "special_tokens_mask" in encoded_inputs:
+ encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
+ encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
+ else:
+ raise ValueError("Invalid padding strategy:" + str(self.padding_side))
+
+ return encoded_inputs
diff --git a/venv/lib/python3.10/site-packages/transformers/models/markuplm/tokenization_markuplm_fast.py b/venv/lib/python3.10/site-packages/transformers/models/markuplm/tokenization_markuplm_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff0e4ffeb56e9f1b0721e86f2e82324b14a3f477
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/markuplm/tokenization_markuplm_fast.py
@@ -0,0 +1,918 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Fast tokenization class for MarkupLM. It overwrites 2 methods of the slow tokenizer class, namely _batch_encode_plus
+and _encode_plus, in which the Rust tokenizer is used.
+"""
+
+import json
+from functools import lru_cache
+from typing import Dict, List, Optional, Tuple, Union
+
+from tokenizers import pre_tokenizers, processors
+
+from ...file_utils import PaddingStrategy, TensorType, add_end_docstrings
+from ...tokenization_utils_base import (
+ ENCODE_KWARGS_DOCSTRING,
+ AddedToken,
+ BatchEncoding,
+ EncodedInput,
+ PreTokenizedInput,
+ TextInput,
+ TextInputPair,
+ TruncationStrategy,
+)
+from ...tokenization_utils_fast import PreTrainedTokenizerFast
+from ...utils import logging
+from .tokenization_markuplm import MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING, MarkupLMTokenizer
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
+
+
+@lru_cache()
+def bytes_to_unicode():
+ """
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
+ characters the bpe code barfs on. The reversible bpe codes work on unicode strings. This means you need a large #
+ of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset
+ you end up needing around 5K for decent coverage. This is a significant percentage of your normal, say, 32K bpe
+ vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
+ """
+ bs = (
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
+ )
+ cs = bs[:]
+ n = 0
+ for b in range(2**8):
+ if b not in bs:
+ bs.append(b)
+ cs.append(2**8 + n)
+ n += 1
+ cs = [chr(n) for n in cs]
+ return dict(zip(bs, cs))
+
+
+def get_pairs(word):
+ """
+ Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length
+ strings).
+ """
+ pairs = set()
+ prev_char = word[0]
+ for char in word[1:]:
+ pairs.add((prev_char, char))
+ prev_char = char
+ return pairs
+
+
+class MarkupLMTokenizerFast(PreTrainedTokenizerFast):
+ r"""
+ Construct a MarkupLM tokenizer. Based on byte-level Byte-Pair-Encoding (BPE).
+
+ [`MarkupLMTokenizerFast`] can be used to turn HTML strings into to token-level `input_ids`, `attention_mask`,
+ `token_type_ids`, `xpath_tags_seq` and `xpath_tags_seq`. This tokenizer inherits from [`PreTrainedTokenizer`] which
+ contains most of the main methods.
+
+ Users should refer to this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ merges_file (`str`):
+ Path to the merges file.
+ errors (`str`, *optional*, defaults to `"replace"`):
+ Paradigm to follow when decoding bytes to UTF-8. See
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
+ The token used is the `sep_token`.
+
+
+
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ cls_token (`str`, *optional*, defaults to `""`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ mask_token (`str`, *optional*, defaults to `""`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
+ other word. (RoBERTa tokenizer detect beginning of words by the preceding space).
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ slow_tokenizer_class = MarkupLMTokenizer
+
+ def __init__(
+ self,
+ vocab_file,
+ merges_file,
+ tags_dict,
+ tokenizer_file=None,
+ errors="replace",
+ bos_token="",
+ eos_token="",
+ sep_token="",
+ cls_token="",
+ unk_token="",
+ pad_token="",
+ mask_token="",
+ add_prefix_space=False,
+ max_depth=50,
+ max_width=1000,
+ pad_width=1001,
+ pad_token_label=-100,
+ only_label_first_subword=True,
+ trim_offsets=False,
+ **kwargs,
+ ):
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
+ sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
+ cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
+
+ # Mask token behave like a normal word, i.e. include the space before it
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
+
+ super().__init__(
+ vocab_file=vocab_file,
+ merges_file=merges_file,
+ tags_dict=tags_dict,
+ tokenizer_file=tokenizer_file,
+ errors=errors,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ cls_token=cls_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ add_prefix_space=add_prefix_space,
+ trim_offsets=trim_offsets,
+ max_depth=max_depth,
+ max_width=max_width,
+ pad_width=pad_width,
+ pad_token_label=pad_token_label,
+ only_label_first_subword=only_label_first_subword,
+ **kwargs,
+ )
+ if trim_offsets:
+ # Not implemented yet, because we need to chain two post processors which is not possible yet
+ # We need to wait for https://github.com/huggingface/tokenizers/pull/1005
+ # With `trim_offsets=False` we don't need to do add `processors.ByteLevel(trim_offsets=False)`
+ # because it's not doing anything
+ raise NotImplementedError(
+ "`trim_offsets=True` is not implemented for MarkupLMTokenizerFast. Please set it to False."
+ )
+
+ self.tags_dict = tags_dict
+
+ pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
+ if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
+ pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type"))
+ pre_tok_state["add_prefix_space"] = add_prefix_space
+ self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
+
+ self.add_prefix_space = add_prefix_space
+
+ tokenizer_component = "post_processor"
+ tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None)
+ if tokenizer_component_instance:
+ state = json.loads(tokenizer_component_instance.__getstate__())
+
+ # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
+ if "sep" in state:
+ state["sep"] = tuple(state["sep"])
+ if "cls" in state:
+ state["cls"] = tuple(state["cls"])
+
+ changes_to_apply = False
+
+ if state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
+ state["add_prefix_space"] = add_prefix_space
+ changes_to_apply = True
+
+ if changes_to_apply:
+ component_class = getattr(processors, state.pop("type"))
+ new_value = component_class(**state)
+ setattr(self.backend_tokenizer, tokenizer_component, new_value)
+
+ # additional properties
+ self.max_depth = max_depth
+ self.max_width = max_width
+ self.pad_width = pad_width
+ self.unk_tag_id = len(self.tags_dict)
+ self.pad_tag_id = self.unk_tag_id + 1
+ self.pad_xpath_tags_seq = [self.pad_tag_id] * self.max_depth
+ self.pad_xpath_subs_seq = [self.pad_width] * self.max_depth
+ self.pad_token_label = pad_token_label
+ self.only_label_first_subword = only_label_first_subword
+
+ def get_xpath_seq(self, xpath):
+ """
+ Given the xpath expression of one particular node (like "/html/body/div/li[1]/div/span[2]"), return a list of
+ tag IDs and corresponding subscripts, taking into account max depth.
+ """
+ xpath_tags_list = []
+ xpath_subs_list = []
+
+ xpath_units = xpath.split("/")
+ for unit in xpath_units:
+ if not unit.strip():
+ continue
+ name_subs = unit.strip().split("[")
+ tag_name = name_subs[0]
+ sub = 0 if len(name_subs) == 1 else int(name_subs[1][:-1])
+ xpath_tags_list.append(self.tags_dict.get(tag_name, self.unk_tag_id))
+ xpath_subs_list.append(min(self.max_width, sub))
+
+ xpath_tags_list = xpath_tags_list[: self.max_depth]
+ xpath_subs_list = xpath_subs_list[: self.max_depth]
+ xpath_tags_list += [self.pad_tag_id] * (self.max_depth - len(xpath_tags_list))
+ xpath_subs_list += [self.pad_width] * (self.max_depth - len(xpath_subs_list))
+
+ return xpath_tags_list, xpath_subs_list
+
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
+ def __call__(
+ self,
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
+ text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
+ xpaths: Union[List[List[int]], List[List[List[int]]]] = None,
+ node_labels: Optional[Union[List[int], List[List[int]]]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
+ sequences with nodes, xpaths and optional labels.
+
+ Args:
+ text (`str`, `List[str]`, `List[List[str]]`):
+ The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
+ (words of a single example or questions of a batch of examples) or a list of list of strings (batch of
+ words).
+ text_pair (`List[str]`, `List[List[str]]`):
+ The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
+ (pretokenized string).
+ xpaths (`List[List[int]]`, `List[List[List[int]]]`):
+ Node-level xpaths. Each bounding box should be normalized to be on a 0-1000 scale.
+ node_labels (`List[int]`, `List[List[int]]`, *optional*):
+ Node-level integer labels (for token classification tasks).
+ """
+
+ # Input type checking for clearer error
+ def _is_valid_text_input(t):
+ if isinstance(t, str):
+ # Strings are fine
+ return True
+ elif isinstance(t, (list, tuple)):
+ # List are fine as long as they are...
+ if len(t) == 0:
+ # ... empty
+ return True
+ elif isinstance(t[0], str):
+ # ... list of strings
+ return True
+ elif isinstance(t[0], (list, tuple)):
+ # ... list with an empty list or with a list of strings
+ return len(t[0]) == 0 or isinstance(t[0][0], str)
+ else:
+ return False
+ else:
+ return False
+
+ if text_pair is not None:
+ # in case text + text_pair are provided, text = questions, text_pair = nodes
+ if not _is_valid_text_input(text):
+ raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ")
+ if not isinstance(text_pair, (list, tuple)):
+ raise ValueError(
+ "Nodes must be of type `List[str]` (single pretokenized example), "
+ "or `List[List[str]]` (batch of pretokenized examples)."
+ )
+ else:
+ # in case only text is provided => must be nodes
+ if not isinstance(text, (list, tuple)):
+ raise ValueError(
+ "Nodes must be of type `List[str]` (single pretokenized example), "
+ "or `List[List[str]]` (batch of pretokenized examples)."
+ )
+
+ if text_pair is not None:
+ is_batched = isinstance(text, (list, tuple))
+ else:
+ is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
+
+ nodes = text if text_pair is None else text_pair
+ assert xpaths is not None, "You must provide corresponding xpaths"
+ if is_batched:
+ assert len(nodes) == len(xpaths), "You must provide nodes and xpaths for an equal amount of examples"
+ for nodes_example, xpaths_example in zip(nodes, xpaths):
+ assert len(nodes_example) == len(xpaths_example), "You must provide as many nodes as there are xpaths"
+ else:
+ assert len(nodes) == len(xpaths), "You must provide as many nodes as there are xpaths"
+
+ if is_batched:
+ if text_pair is not None and len(text) != len(text_pair):
+ raise ValueError(
+ f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
+ f" {len(text_pair)}."
+ )
+ batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
+ is_pair = bool(text_pair is not None)
+ return self.batch_encode_plus(
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
+ is_pair=is_pair,
+ xpaths=xpaths,
+ node_labels=node_labels,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+ else:
+ return self.encode_plus(
+ text=text,
+ text_pair=text_pair,
+ xpaths=xpaths,
+ node_labels=node_labels,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
+ def batch_encode_plus(
+ self,
+ batch_text_or_text_pairs: Union[
+ List[TextInput],
+ List[TextInputPair],
+ List[PreTokenizedInput],
+ ],
+ is_pair: bool = None,
+ xpaths: Optional[List[List[List[int]]]] = None,
+ node_labels: Optional[Union[List[int], List[List[int]]]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ return self._batch_encode_plus(
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
+ is_pair=is_pair,
+ xpaths=xpaths,
+ node_labels=node_labels,
+ add_special_tokens=add_special_tokens,
+ padding_strategy=padding_strategy,
+ truncation_strategy=truncation_strategy,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]:
+ batched_input = [(text, pair)] if pair else [text]
+ encodings = self._tokenizer.encode_batch(
+ batched_input, add_special_tokens=add_special_tokens, is_pretokenized=False, **kwargs
+ )
+
+ return encodings[0].tokens
+
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
+ def encode_plus(
+ self,
+ text: Union[TextInput, PreTokenizedInput],
+ text_pair: Optional[PreTokenizedInput] = None,
+ xpaths: Optional[List[List[int]]] = None,
+ node_labels: Optional[List[int]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
+ `__call__` should be used instead.
+
+ Args:
+ text (`str`, `List[str]`, `List[List[str]]`):
+ The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
+ text_pair (`List[str]` or `List[int]`, *optional*):
+ Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
+ list of list of strings (words of a batch of examples).
+ """
+
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ return self._encode_plus(
+ text=text,
+ xpaths=xpaths,
+ text_pair=text_pair,
+ node_labels=node_labels,
+ add_special_tokens=add_special_tokens,
+ padding_strategy=padding_strategy,
+ truncation_strategy=truncation_strategy,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ def _batch_encode_plus(
+ self,
+ batch_text_or_text_pairs: Union[
+ List[TextInput],
+ List[TextInputPair],
+ List[PreTokenizedInput],
+ ],
+ is_pair: bool = None,
+ xpaths: Optional[List[List[List[int]]]] = None,
+ node_labels: Optional[List[List[int]]] = None,
+ add_special_tokens: bool = True,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[str] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ ) -> BatchEncoding:
+ if not isinstance(batch_text_or_text_pairs, list):
+ raise TypeError(f"batch_text_or_text_pairs has to be a list (got {type(batch_text_or_text_pairs)})")
+
+ # Set the truncation and padding strategy and restore the initial configuration
+ self.set_truncation_and_padding(
+ padding_strategy=padding_strategy,
+ truncation_strategy=truncation_strategy,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ )
+
+ if is_pair:
+ batch_text_or_text_pairs = [([text], text_pair) for text, text_pair in batch_text_or_text_pairs]
+
+ encodings = self._tokenizer.encode_batch(
+ batch_text_or_text_pairs,
+ add_special_tokens=add_special_tokens,
+ is_pretokenized=True, # we set this to True as MarkupLM always expects pretokenized inputs
+ )
+
+ # Convert encoding to dict
+ # `Tokens` is a tuple of (List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]],
+ # List[EncodingFast]) with nested dimensions corresponding to batch, overflows, sequence length
+ tokens_and_encodings = [
+ self._convert_encoding(
+ encoding=encoding,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=True
+ if node_labels is not None
+ else return_offsets_mapping, # we use offsets to create the labels
+ return_length=return_length,
+ verbose=verbose,
+ )
+ for encoding in encodings
+ ]
+
+ # Convert the output to have dict[list] from list[dict] and remove the additional overflows dimension
+ # From (variable) shape (batch, overflows, sequence length) to ~ (batch * overflows, sequence length)
+ # (we say ~ because the number of overflow varies with the example in the batch)
+ #
+ # To match each overflowing sample with the original sample in the batch
+ # we add an overflow_to_sample_mapping array (see below)
+ sanitized_tokens = {}
+ for key in tokens_and_encodings[0][0].keys():
+ stack = [e for item, _ in tokens_and_encodings for e in item[key]]
+ sanitized_tokens[key] = stack
+ sanitized_encodings = [e for _, item in tokens_and_encodings for e in item]
+
+ # If returning overflowing tokens, we need to return a mapping
+ # from the batch idx to the original sample
+ if return_overflowing_tokens:
+ overflow_to_sample_mapping = []
+ for i, (toks, _) in enumerate(tokens_and_encodings):
+ overflow_to_sample_mapping += [i] * len(toks["input_ids"])
+ sanitized_tokens["overflow_to_sample_mapping"] = overflow_to_sample_mapping
+
+ for input_ids in sanitized_tokens["input_ids"]:
+ self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose)
+
+ # create the token-level xpaths tags and subscripts
+ xpath_tags_seq = []
+ xpath_subs_seq = []
+ for batch_index in range(len(sanitized_tokens["input_ids"])):
+ if return_overflowing_tokens:
+ original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
+ else:
+ original_index = batch_index
+ xpath_tags_seq_example = []
+ xpath_subs_seq_example = []
+ for id, sequence_id, word_id in zip(
+ sanitized_tokens["input_ids"][batch_index],
+ sanitized_encodings[batch_index].sequence_ids,
+ sanitized_encodings[batch_index].word_ids,
+ ):
+ if word_id is not None:
+ if is_pair and sequence_id == 0:
+ xpath_tags_seq_example.append(self.pad_xpath_tags_seq)
+ xpath_subs_seq_example.append(self.pad_xpath_subs_seq)
+ else:
+ xpath_tags_list, xpath_subs_list = self.get_xpath_seq(xpaths[original_index][word_id])
+ xpath_tags_seq_example.extend([xpath_tags_list])
+ xpath_subs_seq_example.extend([xpath_subs_list])
+ else:
+ if id in [self.cls_token_id, self.sep_token_id, self.pad_token_id]:
+ xpath_tags_seq_example.append(self.pad_xpath_tags_seq)
+ xpath_subs_seq_example.append(self.pad_xpath_subs_seq)
+ else:
+ raise ValueError("Id not recognized")
+ xpath_tags_seq.append(xpath_tags_seq_example)
+ xpath_subs_seq.append(xpath_subs_seq_example)
+
+ sanitized_tokens["xpath_tags_seq"] = xpath_tags_seq
+ sanitized_tokens["xpath_subs_seq"] = xpath_subs_seq
+
+ # optionally, create the labels
+ if node_labels is not None:
+ labels = []
+ for batch_index in range(len(sanitized_tokens["input_ids"])):
+ if return_overflowing_tokens:
+ original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
+ else:
+ original_index = batch_index
+ labels_example = []
+ for id, offset, word_id in zip(
+ sanitized_tokens["input_ids"][batch_index],
+ sanitized_tokens["offset_mapping"][batch_index],
+ sanitized_encodings[batch_index].word_ids,
+ ):
+ if word_id is not None:
+ if self.only_label_first_subword:
+ if offset[0] == 0:
+ # Use the real label id for the first token of the word, and padding ids for the remaining tokens
+ labels_example.append(node_labels[original_index][word_id])
+ else:
+ labels_example.append(self.pad_token_label)
+ else:
+ labels_example.append(node_labels[original_index][word_id])
+ else:
+ labels_example.append(self.pad_token_label)
+ labels.append(labels_example)
+
+ sanitized_tokens["labels"] = labels
+ # finally, remove offsets if the user didn't want them
+ if not return_offsets_mapping:
+ del sanitized_tokens["offset_mapping"]
+
+ return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors)
+
+ def _encode_plus(
+ self,
+ text: Union[TextInput, PreTokenizedInput],
+ text_pair: Optional[PreTokenizedInput] = None,
+ xpaths: Optional[List[List[int]]] = None,
+ node_labels: Optional[List[int]] = None,
+ add_special_tokens: bool = True,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[bool] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ # make it a batched input
+ # 2 options:
+ # 1) only text, in case text must be a list of str
+ # 2) text + text_pair, in which case text = str and text_pair a list of str
+ batched_input = [(text, text_pair)] if text_pair else [text]
+ batched_xpaths = [xpaths]
+ batched_node_labels = [node_labels] if node_labels is not None else None
+ batched_output = self._batch_encode_plus(
+ batched_input,
+ is_pair=bool(text_pair is not None),
+ xpaths=batched_xpaths,
+ node_labels=batched_node_labels,
+ add_special_tokens=add_special_tokens,
+ padding_strategy=padding_strategy,
+ truncation_strategy=truncation_strategy,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ # Return tensor is None, then we can remove the leading batch axis
+ # Overflowing tokens are returned as a batch of output so we keep them in this case
+ if return_tensors is None and not return_overflowing_tokens:
+ batched_output = BatchEncoding(
+ {
+ key: value[0] if len(value) > 0 and isinstance(value[0], list) else value
+ for key, value in batched_output.items()
+ },
+ batched_output.encodings,
+ )
+
+ self._eventual_warn_about_too_long_sequence(batched_output["input_ids"], max_length, verbose)
+
+ return batched_output
+
+ def _pad(
+ self,
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
+ max_length: Optional[int] = None,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ pad_to_multiple_of: Optional[int] = None,
+ return_attention_mask: Optional[bool] = None,
+ ) -> dict:
+ """
+ Args:
+ Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
+ encoded_inputs:
+ Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
+ max_length: maximum length of the returned list and optionally padding length (see below).
+ Will truncate by taking into account the special tokens.
+ padding_strategy: PaddingStrategy to use for padding.
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
+ The tokenizer padding sides are defined in self.padding_side:
+ - 'left': pads on the left of the sequences
+ - 'right': pads on the right of the sequences
+ pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
+ This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
+ `>= 7.5` (Volta).
+ return_attention_mask:
+ (optional) Set to False to avoid returning attention mask (default: set to model specifics)
+ """
+ # Load from model defaults
+ if return_attention_mask is None:
+ return_attention_mask = "attention_mask" in self.model_input_names
+
+ required_input = encoded_inputs[self.model_input_names[0]]
+
+ if padding_strategy == PaddingStrategy.LONGEST:
+ max_length = len(required_input)
+
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
+
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
+
+ # Initialize attention mask if not present.
+ if return_attention_mask and "attention_mask" not in encoded_inputs:
+ encoded_inputs["attention_mask"] = [1] * len(required_input)
+
+ if needs_to_be_padded:
+ difference = max_length - len(required_input)
+ if self.padding_side == "right":
+ if return_attention_mask:
+ encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
+ if "token_type_ids" in encoded_inputs:
+ encoded_inputs["token_type_ids"] = (
+ encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
+ )
+ if "xpath_tags_seq" in encoded_inputs:
+ encoded_inputs["xpath_tags_seq"] = (
+ encoded_inputs["xpath_tags_seq"] + [self.pad_xpath_tags_seq] * difference
+ )
+ if "xpath_subs_seq" in encoded_inputs:
+ encoded_inputs["xpath_subs_seq"] = (
+ encoded_inputs["xpath_subs_seq"] + [self.pad_xpath_subs_seq] * difference
+ )
+ if "labels" in encoded_inputs:
+ encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference
+ if "special_tokens_mask" in encoded_inputs:
+ encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
+ encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
+ elif self.padding_side == "left":
+ if return_attention_mask:
+ encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
+ if "token_type_ids" in encoded_inputs:
+ encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
+ "token_type_ids"
+ ]
+ if "xpath_tags_seq" in encoded_inputs:
+ encoded_inputs["xpath_tags_seq"] = [self.pad_xpath_tags_seq] * difference + encoded_inputs[
+ "xpath_tags_seq"
+ ]
+ if "xpath_subs_seq" in encoded_inputs:
+ encoded_inputs["xpath_subs_seq"] = [self.pad_xpath_subs_seq] * difference + encoded_inputs[
+ "xpath_subs_seq"
+ ]
+ if "labels" in encoded_inputs:
+ encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"]
+ if "special_tokens_mask" in encoded_inputs:
+ encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
+ encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
+ else:
+ raise ValueError("Invalid padding strategy:" + str(self.padding_side))
+
+ return encoded_inputs
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A RoBERTa sequence has the following format:
+ - single sequence: ` X `
+ - pair of sequences: ` A B `
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ return cls + token_ids_0 + sep + token_ids_1 + sep
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not
+ make use of token type ids, therefore a list of zeros is returned.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ Returns:
+ `List[int]`: List of zeros.
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep + token_ids_1 + sep) * [0]
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
+ return tuple(files)
diff --git a/venv/lib/python3.10/site-packages/transformers/tools/__pycache__/base.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/tools/__pycache__/base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3230a25b23183f5d3984f09bf065af209dcab37b
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/tools/__pycache__/base.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/tools/__pycache__/text_question_answering.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/tools/__pycache__/text_question_answering.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8876ac14e5872e862ea711dc2e10593835acdf1a
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/tools/__pycache__/text_question_answering.cpython-310.pyc differ