diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/canine/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/canine/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d036045e2f2156e12e33f8602dba5f0ebcaac008
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/canine/__init__.py
@@ -0,0 +1,69 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
+
+
+_import_structure = {
+ "configuration_canine": ["CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CanineConfig"],
+ "tokenization_canine": ["CanineTokenizer"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_canine"] = [
+ "CANINE_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "CanineForMultipleChoice",
+ "CanineForQuestionAnswering",
+ "CanineForSequenceClassification",
+ "CanineForTokenClassification",
+ "CanineLayer",
+ "CanineModel",
+ "CaninePreTrainedModel",
+ "load_tf_weights_in_canine",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
+ from .tokenization_canine import CanineTokenizer
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_canine import (
+ CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
+ CanineForMultipleChoice,
+ CanineForQuestionAnswering,
+ CanineForSequenceClassification,
+ CanineForTokenClassification,
+ CanineLayer,
+ CanineModel,
+ CaninePreTrainedModel,
+ load_tf_weights_in_canine,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/canine/convert_canine_original_tf_checkpoint_to_pytorch.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/canine/convert_canine_original_tf_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..5d50050d039687c7360d42e52edd583bd844a77a
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/canine/convert_canine_original_tf_checkpoint_to_pytorch.py
@@ -0,0 +1,66 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert CANINE checkpoint."""
+
+
+import argparse
+
+from transformers import CanineConfig, CanineModel, CanineTokenizer, load_tf_weights_in_canine
+from transformers.utils import logging
+
+
+logging.set_verbosity_info()
+
+
+def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, pytorch_dump_path):
+ # Initialize PyTorch model
+ config = CanineConfig()
+ model = CanineModel(config)
+ model.eval()
+
+ print(f"Building PyTorch model from configuration: {config}")
+
+ # Load weights from tf checkpoint
+ load_tf_weights_in_canine(model, config, tf_checkpoint_path)
+
+ # Save pytorch-model (weights and configuration)
+ print(f"Save PyTorch model to {pytorch_dump_path}")
+ model.save_pretrained(pytorch_dump_path)
+
+ # Save tokenizer files
+ tokenizer = CanineTokenizer()
+ print(f"Save tokenizer files to {pytorch_dump_path}")
+ tokenizer.save_pretrained(pytorch_dump_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--tf_checkpoint_path",
+ default=None,
+ type=str,
+ required=True,
+ help="Path to the TensorFlow checkpoint. Should end with model.ckpt",
+ )
+ parser.add_argument(
+ "--pytorch_dump_path",
+ default=None,
+ type=str,
+ required=True,
+ help="Path to a folder where the PyTorch model will be placed.",
+ )
+ args = parser.parse_args()
+ convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.pytorch_dump_path)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/canine/tokenization_canine.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/canine/tokenization_canine.py
new file mode 100644
index 0000000000000000000000000000000000000000..024507f77877d73729928ae1e04cf0087cedb259
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/canine/tokenization_canine.py
@@ -0,0 +1,241 @@
+# coding=utf-8
+# Copyright Google AI and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for CANINE."""
+
+from typing import Dict, List, Optional
+
+from ...tokenization_utils import AddedToken, PreTrainedTokenizer
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+# Unicode defines 1,114,112 total “codepoints”
+UNICODE_VOCAB_SIZE = 1114112
+
+# Below: Constants defining canonical codepoints for special, pseudo-characters.
+# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
+PAD = 0
+CLS = 0xE000
+SEP = 0xE001
+BOS = 0xE002
+MASK = 0xE003
+RESERVED = 0xE004
+
+# Maps special codepoints to human-readable names.
+SPECIAL_CODEPOINTS: Dict[int, str] = {
+ # Special symbols are represented using codepoints values that are valid,
+ # but designated as "Private Use", meaning that they will never be assigned
+ # characters by the Unicode Consortium, and are thus safe for use here.
+ #
+ # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
+ # excluded and should fail with a hard error.
+ CLS: "[CLS]",
+ SEP: "[SEP]",
+ BOS: "[BOS]",
+ MASK: "[MASK]",
+ PAD: "[PAD]",
+ RESERVED: "[RESERVED]",
+}
+
+# Maps special codepoint human-readable names to their codepoint values.
+SPECIAL_CODEPOINTS_BY_NAME: Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
+
+
+class CanineTokenizer(PreTrainedTokenizer):
+ r"""
+ Construct a CANINE tokenizer (i.e. a character splitter). It turns text into a sequence of characters, and then
+ converts each character into its Unicode code point.
+
+ [`CanineTokenizer`] inherits from [`PreTrainedTokenizer`].
+
+ Refer to superclass [`PreTrainedTokenizer`] for usage examples and documentation concerning parameters.
+
+ Args:
+ model_max_length (`int`, *optional*, defaults to 2048):
+ The maximum sentence length the model accepts.
+ """
+
+ def __init__(
+ self,
+ bos_token=chr(CLS),
+ eos_token=chr(SEP),
+ sep_token=chr(SEP),
+ cls_token=chr(CLS),
+ pad_token=chr(PAD),
+ mask_token=chr(MASK),
+ add_prefix_space=False,
+ model_max_length=2048,
+ **kwargs,
+ ):
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
+ sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
+ cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
+
+ # Mask token behave like a normal word, i.e. include the space before it
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
+
+ # Creates a mapping for looking up the IDs of special symbols.
+ self._special_codepoints: Dict[str, int] = {}
+ for codepoint, name in SPECIAL_CODEPOINTS.items():
+ self._special_codepoints[name] = codepoint
+
+ # Creates a mapping for looking up the string forms of special symbol IDs.
+ self._special_codepoint_strings: Dict[int, str] = {
+ codepoint: name for name, codepoint in self._special_codepoints.items()
+ }
+
+ self._unicode_vocab_size = UNICODE_VOCAB_SIZE
+ self._num_special_tokens = len(self._special_codepoints)
+
+ super().__init__(
+ bos_token=bos_token,
+ eos_token=eos_token,
+ sep_token=sep_token,
+ cls_token=cls_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ add_prefix_space=add_prefix_space,
+ model_max_length=model_max_length,
+ **kwargs,
+ )
+
+ @property
+ def vocab_size(self) -> int:
+ return self._unicode_vocab_size
+
+ def get_vocab(self):
+ vocab = {chr(i): i for i in range(self.vocab_size)}
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ def _tokenize(self, text: str) -> List[str]:
+ """Tokenize a string (i.e. perform character splitting)."""
+ return list(text)
+
+ def _convert_token_to_id(self, token: str) -> int:
+ """Converts a token (i.e. a Unicode character) in an id (i.e. its integer Unicode code point value)."""
+ try:
+ return ord(token)
+ except TypeError:
+ raise ValueError(f"invalid token: '{token}'")
+
+ def _convert_id_to_token(self, index: int) -> str:
+ """
+ Converts a Unicode code point (integer) in a token (str). In case it's a special code point, convert to
+ human-readable format.
+ """
+ try:
+ if index in SPECIAL_CODEPOINTS:
+ return SPECIAL_CODEPOINTS[index]
+ return chr(index)
+ except TypeError:
+ raise ValueError(f"invalid id: {index}")
+
+ def convert_tokens_to_string(self, tokens):
+ return "".join(tokens)
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A CANINE sequence has the following format:
+
+ - single sequence: `[CLS] X [SEP]`
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ result = cls + token_ids_0 + sep
+ if token_ids_1 is not None:
+ result += token_ids_1 + sep
+ return result
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ result = [1] + ([0] * len(token_ids_0)) + [1]
+ if token_ids_1 is not None:
+ result += ([0] * len(token_ids_1)) + [1]
+ return result
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A CANINE
+ sequence pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ result = len(cls + token_ids_0 + sep) * [0]
+ if token_ids_1 is not None:
+ result += len(token_ids_1 + sep) * [1]
+ return result
+
+ # CanineTokenizer has no vocab file
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None):
+ return ()
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..65aba047469da14c6b25523fba31432e823ec47d
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/__init__.py
@@ -0,0 +1,49 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
+
+
+_import_structure = {
+ "configuration_fsmt": ["FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP", "FSMTConfig"],
+ "tokenization_fsmt": ["FSMTTokenizer"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_fsmt"] = ["FSMTForConditionalGeneration", "FSMTModel", "PretrainedFSMTModel"]
+
+
+if TYPE_CHECKING:
+ from .configuration_fsmt import FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP, FSMTConfig
+ from .tokenization_fsmt import FSMTTokenizer
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_fsmt import FSMTForConditionalGeneration, FSMTModel, PretrainedFSMTModel
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9cf1badbfe097b6e3214420df7445de427131001
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/configuration_fsmt.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/configuration_fsmt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..40252af8063a38dfa932fe95256453d2293a578e
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/configuration_fsmt.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/convert_fsmt_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/convert_fsmt_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6c697d4d6c18d626ff5dbde7da5a9b5d721c5044
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/convert_fsmt_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/modeling_fsmt.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/modeling_fsmt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..82acef85706590a965784d0f7aac42e9ff7b59f0
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/modeling_fsmt.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/tokenization_fsmt.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/tokenization_fsmt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ceb64218c110ac7620aa80b84425112c58af5475
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/tokenization_fsmt.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/configuration_fsmt.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/configuration_fsmt.py
new file mode 100644
index 0000000000000000000000000000000000000000..68abe47c019abaae981eb0beedcdbb7c755dff2e
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/configuration_fsmt.py
@@ -0,0 +1,219 @@
+# coding=utf-8
+# Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" FSMT configuration"""
+
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class DecoderConfig(PretrainedConfig):
+ r"""
+ Configuration class for FSMT's decoder specific things. note: this is a private helper class
+ """
+
+ model_type = "fsmt_decoder"
+
+ def __init__(self, vocab_size=0, bos_token_id=0):
+ super().__init__()
+ self.vocab_size = vocab_size
+ self.bos_token_id = bos_token_id
+
+
+class FSMTConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`FSMTModel`]. It is used to instantiate a FSMT
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the FSMT
+ [facebook/wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ langs (`List[str]`):
+ A list with source language and target_language (e.g., ['en', 'ru']).
+ src_vocab_size (`int`):
+ Vocabulary size of the encoder. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed to the forward method in the encoder.
+ tgt_vocab_size (`int`):
+ Vocabulary size of the decoder. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed to the forward method in the decoder.
+ d_model (`int`, *optional*, defaults to 1024):
+ Dimensionality of the layers and the pooler layer.
+ encoder_layers (`int`, *optional*, defaults to 12):
+ Number of encoder layers.
+ decoder_layers (`int`, *optional*, defaults to 12):
+ Number of decoder layers.
+ encoder_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ decoder_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer decoder.
+ decoder_ffn_dim (`int`, *optional*, defaults to 4096):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
+ encoder_ffn_dim (`int`, *optional*, defaults to 4096):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
+ activation_function (`str` or `Callable`, *optional*, defaults to `"relu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ activation_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for activations inside the fully connected layer.
+ max_position_embeddings (`int`, *optional*, defaults to 1024):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ init_std (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ scale_embedding (`bool`, *optional*, defaults to `True`):
+ Scale embeddings by diving by sqrt(d_model).
+ bos_token_id (`int`, *optional*, defaults to 0)
+ Beginning of stream token id.
+ pad_token_id (`int`, *optional*, defaults to 1)
+ Padding token id.
+ eos_token_id (`int`, *optional*, defaults to 2)
+ End of stream token id.
+ decoder_start_token_id (`int`, *optional*):
+ This model starts decoding with `eos_token_id`
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
+ Google "layerdrop arxiv", as its not explainable in one line.
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
+ Google "layerdrop arxiv", as its not explainable in one line.
+ is_encoder_decoder (`bool`, *optional*, defaults to `True`):
+ Whether this is an encoder/decoder model.
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
+ Whether to tie input and output embeddings.
+ num_beams (`int`, *optional*, defaults to 5)
+ Number of beams for beam search that will be used by default in the `generate` method of the model. 1 means
+ no beam search.
+ length_penalty (`float`, *optional*, defaults to 1)
+ Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to
+ the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log
+ likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while
+ `length_penalty` < 0.0 encourages shorter sequences.
+ early_stopping (`bool`, *optional*, defaults to `False`)
+ Flag that will be used by default in the `generate` method of the model. Whether to stop the beam search
+ when at least `num_beams` sentences are finished per batch or not.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models).
+ forced_eos_token_id (`int`, *optional*, defaults to 2):
+ The id of the token to force as the last generated token when `max_length` is reached. Usually set to
+ `eos_token_id`.
+
+ Examples:
+
+ ```python
+ >>> from transformers import FSMTConfig, FSMTModel
+
+ >>> # Initializing a FSMT facebook/wmt19-en-ru style configuration
+ >>> config = FSMTConfig()
+
+ >>> # Initializing a model (with random weights) from the configuration
+ >>> model = FSMTModel(config)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "fsmt"
+ attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
+
+ # update the defaults from config file
+ def __init__(
+ self,
+ langs=["en", "de"],
+ src_vocab_size=42024,
+ tgt_vocab_size=42024,
+ activation_function="relu",
+ d_model=1024,
+ max_length=200,
+ max_position_embeddings=1024,
+ encoder_ffn_dim=4096,
+ encoder_layers=12,
+ encoder_attention_heads=16,
+ encoder_layerdrop=0.0,
+ decoder_ffn_dim=4096,
+ decoder_layers=12,
+ decoder_attention_heads=16,
+ decoder_layerdrop=0.0,
+ attention_dropout=0.0,
+ dropout=0.1,
+ activation_dropout=0.0,
+ init_std=0.02,
+ decoder_start_token_id=2,
+ is_encoder_decoder=True,
+ scale_embedding=True,
+ tie_word_embeddings=False,
+ num_beams=5,
+ length_penalty=1.0,
+ early_stopping=False,
+ use_cache=True,
+ pad_token_id=1,
+ bos_token_id=0,
+ eos_token_id=2,
+ forced_eos_token_id=2,
+ **common_kwargs,
+ ):
+ self.langs = langs
+ self.src_vocab_size = src_vocab_size
+ self.tgt_vocab_size = tgt_vocab_size
+ self.d_model = d_model # encoder_embed_dim and decoder_embed_dim
+
+ self.encoder_ffn_dim = encoder_ffn_dim
+ self.encoder_layers = self.num_hidden_layers = encoder_layers
+ self.encoder_attention_heads = encoder_attention_heads
+ self.encoder_layerdrop = encoder_layerdrop
+ self.decoder_layerdrop = decoder_layerdrop
+ self.decoder_ffn_dim = decoder_ffn_dim
+ self.decoder_layers = decoder_layers
+ self.decoder_attention_heads = decoder_attention_heads
+ self.max_position_embeddings = max_position_embeddings
+ self.init_std = init_std # Normal(0, this parameter)
+ self.activation_function = activation_function
+
+ self.decoder = DecoderConfig(vocab_size=tgt_vocab_size, bos_token_id=eos_token_id)
+ if "decoder" in common_kwargs:
+ del common_kwargs["decoder"]
+
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
+
+ # 3 Types of Dropout
+ self.attention_dropout = attention_dropout
+ self.activation_dropout = activation_dropout
+ self.dropout = dropout
+
+ self.use_cache = use_cache
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ decoder_start_token_id=decoder_start_token_id,
+ is_encoder_decoder=is_encoder_decoder,
+ tie_word_embeddings=tie_word_embeddings,
+ forced_eos_token_id=forced_eos_token_id,
+ max_length=max_length,
+ num_beams=num_beams,
+ length_penalty=length_penalty,
+ early_stopping=early_stopping,
+ **common_kwargs,
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..ef2764f0ed10bace714f42f5f74ea6d9a147c613
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py
@@ -0,0 +1,280 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Note: if you intend to run this script make sure you look under scripts/fsmt/
+# to locate the appropriate script to do the work correctly. There is a set of scripts to:
+# - download and prepare data and run the conversion script
+# - perform eval to get the best hparam into the config
+# - generate model_cards - useful if you have multiple models from the same paper
+
+import argparse
+import json
+import os
+import re
+from collections import OrderedDict
+from os.path import basename, dirname
+
+import fairseq
+import torch
+from fairseq import hub_utils
+from fairseq.data.dictionary import Dictionary
+
+from transformers import FSMTConfig, FSMTForConditionalGeneration
+from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
+from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
+from transformers.utils import WEIGHTS_NAME, logging
+
+
+logging.set_verbosity_warning()
+
+json_indent = 2
+
+# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
+# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
+#
+# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
+# * `early_stopping`: `False` consistently scored better
+# * `length_penalty` varied, so will assign the best one depending on the model
+best_score_hparams = {
+ # fairseq:
+ "wmt19-ru-en": {"length_penalty": 1.1},
+ "wmt19-en-ru": {"length_penalty": 1.15},
+ "wmt19-en-de": {"length_penalty": 1.0},
+ "wmt19-de-en": {"length_penalty": 1.1},
+ # allenai:
+ "wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
+ "wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
+ "wmt16-en-de-12-1": {"length_penalty": 0.8},
+ "wmt19-de-en-6-6-base": {"length_penalty": 0.6},
+ "wmt19-de-en-6-6-big": {"length_penalty": 0.6},
+}
+
+# this remaps the different models to their organization names
+org_names = {}
+for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
+ org_names[m] = "facebook"
+for m in [
+ "wmt16-en-de-dist-12-1",
+ "wmt16-en-de-dist-6-1",
+ "wmt16-en-de-12-1",
+ "wmt19-de-en-6-6-base",
+ "wmt19-de-en-6-6-big",
+]:
+ org_names[m] = "allenai"
+
+
+def rewrite_dict_keys(d):
+ # (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
+ # e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er': 7}
+ d2 = dict((re.sub(r"@@$", "", k), v) if k.endswith("@@") else (re.sub(r"$", "", k), v) for k, v in d.items())
+ keep_keys = " ".split()
+ # restore the special tokens
+ for k in keep_keys:
+ del d2[f"{k}"]
+ d2[k] = d[k] # restore
+ return d2
+
+
+def convert_fsmt_checkpoint_to_pytorch(fsmt_checkpoint_path, pytorch_dump_folder_path):
+ # prep
+ assert os.path.exists(fsmt_checkpoint_path)
+ os.makedirs(pytorch_dump_folder_path, exist_ok=True)
+ print(f"Writing results to {pytorch_dump_folder_path}")
+
+ # handle various types of models
+
+ checkpoint_file = basename(fsmt_checkpoint_path)
+ fsmt_folder_path = dirname(fsmt_checkpoint_path)
+
+ cls = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
+ models = cls.hub_models()
+ kwargs = {"bpe": "fastbpe", "tokenizer": "moses"}
+ data_name_or_path = "."
+ # note: since the model dump is old, fairseq has upgraded its model some
+ # time later, and it does a whole lot of rewrites and splits on the saved
+ # weights, therefore we can't use torch.load() directly on the model file.
+ # see: upgrade_state_dict(state_dict) in fairseq_model.py
+ print(f"using checkpoint {checkpoint_file}")
+ chkpt = hub_utils.from_pretrained(
+ fsmt_folder_path, checkpoint_file, data_name_or_path, archive_map=models, **kwargs
+ )
+
+ args = vars(chkpt["args"]["model"])
+
+ src_lang = args["source_lang"]
+ tgt_lang = args["target_lang"]
+
+ data_root = dirname(pytorch_dump_folder_path)
+ model_dir = basename(pytorch_dump_folder_path)
+
+ # dicts
+ src_dict_file = os.path.join(fsmt_folder_path, f"dict.{src_lang}.txt")
+ tgt_dict_file = os.path.join(fsmt_folder_path, f"dict.{tgt_lang}.txt")
+
+ src_dict = Dictionary.load(src_dict_file)
+ src_vocab = rewrite_dict_keys(src_dict.indices)
+ src_vocab_size = len(src_vocab)
+ src_vocab_file = os.path.join(pytorch_dump_folder_path, "vocab-src.json")
+ print(f"Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records")
+ with open(src_vocab_file, "w", encoding="utf-8") as f:
+ f.write(json.dumps(src_vocab, ensure_ascii=False, indent=json_indent))
+
+ # detect whether this is a do_lower_case situation, which can be derived by checking whether we
+ # have at least one uppercase letter in the source vocab
+ do_lower_case = True
+ for k in src_vocab.keys():
+ if not k.islower():
+ do_lower_case = False
+ break
+
+ tgt_dict = Dictionary.load(tgt_dict_file)
+ tgt_vocab = rewrite_dict_keys(tgt_dict.indices)
+ tgt_vocab_size = len(tgt_vocab)
+ tgt_vocab_file = os.path.join(pytorch_dump_folder_path, "vocab-tgt.json")
+ print(f"Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records")
+ with open(tgt_vocab_file, "w", encoding="utf-8") as f:
+ f.write(json.dumps(tgt_vocab, ensure_ascii=False, indent=json_indent))
+
+ # merges_file (bpecodes)
+ merges_file = os.path.join(pytorch_dump_folder_path, VOCAB_FILES_NAMES["merges_file"])
+ for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
+ fsmt_merges_file = os.path.join(fsmt_folder_path, fn)
+ if os.path.exists(fsmt_merges_file):
+ break
+ with open(fsmt_merges_file, encoding="utf-8") as fin:
+ merges = fin.read()
+ merges = re.sub(r" \d+$", "", merges, 0, re.M) # remove frequency number
+ print(f"Generating {merges_file}")
+ with open(merges_file, "w", encoding="utf-8") as fout:
+ fout.write(merges)
+
+ # model config
+ fsmt_model_config_file = os.path.join(pytorch_dump_folder_path, "config.json")
+
+ # validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
+ # may have to modify the tokenizer if a different type is used by a future model
+ assert args["bpe"] == "fastbpe", f"need to extend tokenizer to support bpe={args['bpe']}"
+ assert args["tokenizer"] == "moses", f"need to extend tokenizer to support bpe={args['tokenizer']}"
+
+ model_conf = {
+ "architectures": ["FSMTForConditionalGeneration"],
+ "model_type": "fsmt",
+ "activation_dropout": args["activation_dropout"],
+ "activation_function": "relu",
+ "attention_dropout": args["attention_dropout"],
+ "d_model": args["decoder_embed_dim"],
+ "dropout": args["dropout"],
+ "init_std": 0.02,
+ "max_position_embeddings": args["max_source_positions"],
+ "num_hidden_layers": args["encoder_layers"],
+ "src_vocab_size": src_vocab_size,
+ "tgt_vocab_size": tgt_vocab_size,
+ "langs": [src_lang, tgt_lang],
+ "encoder_attention_heads": args["encoder_attention_heads"],
+ "encoder_ffn_dim": args["encoder_ffn_embed_dim"],
+ "encoder_layerdrop": args["encoder_layerdrop"],
+ "encoder_layers": args["encoder_layers"],
+ "decoder_attention_heads": args["decoder_attention_heads"],
+ "decoder_ffn_dim": args["decoder_ffn_embed_dim"],
+ "decoder_layerdrop": args["decoder_layerdrop"],
+ "decoder_layers": args["decoder_layers"],
+ "bos_token_id": 0,
+ "pad_token_id": 1,
+ "eos_token_id": 2,
+ "is_encoder_decoder": True,
+ "scale_embedding": not args["no_scale_embedding"],
+ "tie_word_embeddings": args["share_all_embeddings"],
+ }
+
+ # good hparam defaults to start with
+ model_conf["num_beams"] = 5
+ model_conf["early_stopping"] = False
+ if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
+ model_conf["length_penalty"] = best_score_hparams[model_dir]["length_penalty"]
+ else:
+ model_conf["length_penalty"] = 1.0
+
+ print(f"Generating {fsmt_model_config_file}")
+ with open(fsmt_model_config_file, "w", encoding="utf-8") as f:
+ f.write(json.dumps(model_conf, ensure_ascii=False, indent=json_indent))
+
+ # tokenizer config
+ fsmt_tokenizer_config_file = os.path.join(pytorch_dump_folder_path, TOKENIZER_CONFIG_FILE)
+
+ tokenizer_conf = {
+ "langs": [src_lang, tgt_lang],
+ "model_max_length": 1024,
+ "do_lower_case": do_lower_case,
+ }
+
+ print(f"Generating {fsmt_tokenizer_config_file}")
+ with open(fsmt_tokenizer_config_file, "w", encoding="utf-8") as f:
+ f.write(json.dumps(tokenizer_conf, ensure_ascii=False, indent=json_indent))
+
+ # model
+ model = chkpt["models"][0]
+ model_state_dict = model.state_dict()
+
+ # rename keys to start with 'model.'
+ model_state_dict = OrderedDict(("model." + k, v) for k, v in model_state_dict.items())
+
+ # remove unneeded keys
+ ignore_keys = [
+ "model.model",
+ "model.encoder.version",
+ "model.decoder.version",
+ "model.encoder_embed_tokens.weight",
+ "model.decoder_embed_tokens.weight",
+ "model.encoder.embed_positions._float_tensor",
+ "model.decoder.embed_positions._float_tensor",
+ ]
+ for k in ignore_keys:
+ model_state_dict.pop(k, None)
+
+ config = FSMTConfig.from_pretrained(pytorch_dump_folder_path)
+ model_new = FSMTForConditionalGeneration(config)
+
+ # check that it loads ok
+ model_new.load_state_dict(model_state_dict, strict=False)
+
+ # save
+ pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)
+ print(f"Generating {pytorch_weights_dump_path}")
+ torch.save(model_state_dict, pytorch_weights_dump_path)
+
+ print("Conversion is done!")
+ print("\nLast step is to upload the files to s3")
+ print(f"cd {data_root}")
+ print(f"transformers-cli upload {model_dir}")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--fsmt_checkpoint_path",
+ default=None,
+ type=str,
+ required=True,
+ help=(
+ "Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
+ " bpecodes, etc."
+ ),
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
+ )
+ args = parser.parse_args()
+ convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/modeling_fsmt.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/modeling_fsmt.py
new file mode 100644
index 0000000000000000000000000000000000000000..4c180c52678b82b80c5a1aa43f42292556c8f1e4
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/modeling_fsmt.py
@@ -0,0 +1,1386 @@
+# coding=utf-8
+# Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Original implementation: https://github.com/pytorch/fairseq/tree/master/examples/wmt19
+# Authors:
+# - @alexeib Alexei Baevski
+# - @edunov Sergey Edunov
+# - @michaelauli Michael Auli
+# - @myleott Myle Ott
+# - @nng555 Nathan Ng
+# - David Grangier
+# - Kyra Yee
+#
+# Paper: Facebook FAIR's WMT19 News Translation Task Submission https://arxiv.org/abs/1907.06616
+#
+"""PyTorch Fairseq model, ported from https://github.com/pytorch/fairseq/tree/master/examples/wmt19"""
+
+import math
+from typing import Any, Dict, List, Optional, Tuple, Union
+
+import torch
+from torch import Tensor, nn
+from torch.nn import CrossEntropyLoss, LayerNorm
+
+from ...activations import ACT2FN
+from ...integrations.deepspeed import is_deepspeed_zero3_enabled
+from ...modeling_outputs import (
+ BaseModelOutput,
+ BaseModelOutputWithPastAndCrossAttentions,
+ Seq2SeqLMOutput,
+ Seq2SeqModelOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ add_code_sample_docstrings,
+ add_end_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_fsmt import FSMTConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "facebook/wmt19-ru-en"
+_CONFIG_FOR_DOC = "FSMTConfig"
+
+# See all FSMT models at https://huggingface.co/models?filter=fsmt
+
+# Porting notes:
+# this one is modeled after BartModel*
+#
+# Currently only translation (fairseq also has weights for LM)
+#
+# fairseq provides weights for ru-en, en-ru and de-en, en-de pairs. All have been ported.
+# - ru-en, en-ru use asymmetric vocab
+# - de-en, en-de use a merged single vocab (but the code works as if they are separate)
+#
+# Differences with Bart:
+# - not using bos token
+# - 2 separate vocabs (src and target)
+# - embed weights aren't tied
+# - uses a model Ensemble (but that part isn't ported/implemented yet) - so we
+# aren't getting as good of a BLEU score
+# - uses a projection layer at the end of the decoder
+# - doesn't use final_logits_bias
+# - beam search: stops as soon as num_beams == len(hypos) (whereas transformers
+# is not satisfied there and will continue searching until the next cycles
+# aren't promising something better), comparing BLEU scores - the transformers
+# algorithm is slightly superior, therefore using the latter. But if you want
+# to match fairseq outputs, you need to pass ``early_stopping=True`` to ``generate()``.
+#
+# SinusoidalPositionalEmbedding is slightly different from Bart's - generates
+# different embeddings. This implementation is copied verbatim from fairseq with
+# some small changes to make it work here.
+#
+# Other changes:
+# - doesn't support use_cache as Bart's version does
+#
+#
+# FSMTConfig changes with BartConfig
+#
+# Differences with BART:
+# - src/tgt vocabs aren't shared
+# - token embeddings aren't shared
+# - needs a language pair
+# - scale_embedding are True
+#
+# some unused args were removed too
+#
+#
+# TODO:
+# - port model ensemble (fs uses 4 model checkpoints)
+# - solve beam search discrepancies
+# docstyle-ignore
+
+"""
+
+Here is how to compare BLEU scores against fairseq implementation:
+
+# en-ru
+
+export PAIR=en-ru
+export DATA_DIR=data/$PAIR
+export SAVE_DIR=data/$PAIR
+export BS=8
+export NUM_BEAMS=50
+mkdir -p $DATA_DIR
+sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
+sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
+echo $PAIR
+PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
+
+# (fairseq BLEU: 36.4 http://matrix.statmt.org/matrix/output/1914?score_id=37605)
+
+
+# ru-en
+
+export PAIR=ru-en
+export DATA_DIR=data/$PAIR
+export SAVE_DIR=data/$PAIR
+export BS=8
+export NUM_BEAMS=50
+mkdir -p $DATA_DIR
+sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
+sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
+PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
+
+
+# (fairseq BLEU: 41.3 http://matrix.statmt.org/matrix/output/1907?run_id=6937)
+
+
+# de-en
+
+export PAIR=de-en
+export DATA_DIR=data/$PAIR
+export SAVE_DIR=data/$PAIR
+export BS=8
+export NUM_BEAMS=50
+mkdir -p $DATA_DIR
+sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
+sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
+echo $PAIR
+PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
+
+# (fairseq BLEU: 42.3 http://matrix.statmt.org/matrix/output/1902?run_id=6750)
+
+
+
+# en-de
+
+export PAIR=en-de
+export DATA_DIR=data/$PAIR
+export SAVE_DIR=data/$PAIR
+export BS=8
+mkdir -p $DATA_DIR
+sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
+sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
+echo $PAIR
+PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
+
+# (fairseq BLEU: 43.1 http://matrix.statmt.org/matrix/output/1909?run_id=6862)
+
+"""
+
+
+FSMT_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`FSMTConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+
+"""
+FSMT_GENERATION_EXAMPLE = r"""
+ Translation example::
+
+ ```python
+ >>> from transformers import AutoTokenizer, FSMTForConditionalGeneration
+
+ >>> mname = "facebook/wmt19-ru-en"
+ >>> model = FSMTForConditionalGeneration.from_pretrained(mname)
+ >>> tokenizer = AutoTokenizer.from_pretrained(mname)
+
+ >>> src_text = "Машинное обучение - это здорово, не так ли?"
+ >>> input_ids = tokenizer(src_text, return_tensors="pt").input_ids
+ >>> outputs = model.generate(input_ids, num_beams=5, num_return_sequences=3)
+ >>> tokenizer.decode(outputs[0], skip_special_tokens=True)
+ "Machine learning is great, isn't it?"
+ ```
+
+"""
+
+FSMT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`FSTMTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ FSMT uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
+ is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
+ 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ encoder_outputs (`Tuple(torch.FloatTensor)`, *optional*):
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden-states at
+ the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ past_key_values (`Tuple(torch.FloatTensor)` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
+ input (see `past_key_values`). This is useful if you want more control over how to convert
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
+
+ If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
+ of `inputs_embeds`.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+def invert_mask(attention_mask):
+ """Turns 1->0, 0->1, False->True, True-> False"""
+ assert attention_mask.dim() == 2
+ return attention_mask.eq(0)
+
+
+def triu_onnx(x, diagonal=0):
+ l = x.shape[0]
+ arange = torch.arange(l, device=x.device)
+ mask = arange.expand(l, l)
+ arange = arange.unsqueeze(-1)
+ if diagonal:
+ arange = arange + diagonal
+ mask = mask >= arange
+ return x.masked_fill(mask == 0, 0)
+
+
+def _prepare_fsmt_decoder_inputs(
+ config,
+ input_ids,
+ decoder_input_ids=None,
+ decoder_padding_mask=None,
+ causal_mask_dtype=torch.float32,
+):
+ """
+ Prepare masks that ignore padding tokens in the decoder and a causal mask for the decoder if none are provided.
+ This mimics the default behavior in fairseq. To override it pass in masks. Note: this is not called during
+ generation
+ """
+ pad_token_id = config.pad_token_id
+ if decoder_input_ids is None:
+ decoder_input_ids = shift_tokens_right(input_ids, pad_token_id)
+ bsz, tgt_len = decoder_input_ids.size()
+ if decoder_padding_mask is None:
+ decoder_padding_mask = make_padding_mask(decoder_input_ids, pad_token_id)
+ else:
+ decoder_padding_mask = invert_mask(decoder_padding_mask)
+ causal_mask = triu_onnx(fill_with_neg_inf(torch.zeros(tgt_len, tgt_len, dtype=causal_mask_dtype)), 1).to(
+ device=decoder_input_ids.device
+ )
+ return decoder_input_ids, decoder_padding_mask, causal_mask
+
+
+class PretrainedFSMTModel(PreTrainedModel):
+ config_class = FSMTConfig
+ base_model_prefix = "model"
+
+ def _init_weights(self, module):
+ std = self.config.init_std
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, SinusoidalPositionalEmbedding):
+ pass
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+ @property
+ def dummy_inputs(self):
+ pad_token = self.config.pad_token_id
+ input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
+ dummy_inputs = {
+ "attention_mask": input_ids.ne(pad_token),
+ "input_ids": input_ids,
+ }
+ return dummy_inputs
+
+
+def _make_linear_from_emb(emb):
+ vocab_size, emb_size = emb.weight.shape
+ lin_layer = nn.Linear(vocab_size, emb_size, bias=False)
+ lin_layer.weight.data = emb.weight.data
+ return lin_layer
+
+
+# Helper Functions, mostly for making masks
+def _check_shapes(shape_1, shape2):
+ if shape_1 != shape2:
+ raise AssertionError(f"shape mismatch: {shape_1} != {shape2}")
+
+
+def shift_tokens_right(input_ids, pad_token_id):
+ """Shift input ids one token to the right, and wrap the last non pad token (usually )."""
+
+ # replace possible -100 values in labels by `pad_token_id`
+ input_ids.masked_fill_(input_ids == -100, pad_token_id)
+
+ prev_output_tokens = input_ids.clone()
+ index_of_eos = (input_ids.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1)
+ prev_output_tokens[:, 0] = input_ids.gather(1, index_of_eos).squeeze()
+ prev_output_tokens[:, 1:] = input_ids[:, :-1]
+ return prev_output_tokens
+
+
+def make_padding_mask(input_ids, padding_idx=1):
+ """True for pad tokens"""
+ padding_mask = input_ids.eq(padding_idx)
+ if not padding_mask.any():
+ padding_mask = None
+ return padding_mask
+
+
+# Helper Modules
+
+
+class EncoderLayer(nn.Module):
+ def __init__(self, config: FSMTConfig):
+ super().__init__()
+ self.embed_dim = config.d_model
+ self.self_attn = Attention(self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout)
+ self.self_attn_layer_norm = LayerNorm(self.embed_dim)
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+ self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
+ self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
+ self.final_layer_norm = LayerNorm(self.embed_dim)
+
+ def forward(self, x, encoder_padding_mask, layer_head_mask, output_attentions=False):
+ """
+ Args:
+ x (`torch.Tensor`): input to the layer of shape *(seq_len, batch, embed_dim)*
+ encoder_padding_mask (`torch.ByteTensor`): binary ByteTensor of shape
+ *(batch, src_len)* where padding elements are indicated by `1`.
+ for t_tgt, t_src is excluded (or masked out), =0 means it is
+ included in attention
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
+ *(config.encoder_attention_heads,)*.
+
+ Returns:
+ encoded output of shape *(seq_len, batch, embed_dim)*
+ """
+ residual = x
+ x, attn_weights = self.self_attn(
+ query=x,
+ key=x,
+ key_padding_mask=encoder_padding_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ x = nn.functional.dropout(x, p=self.dropout, training=self.training)
+ x = residual + x
+ x = self.self_attn_layer_norm(x)
+
+ residual = x
+ x = self.activation_fn(self.fc1(x))
+ x = nn.functional.dropout(x, p=self.activation_dropout, training=self.training)
+ x = self.fc2(x)
+ x = nn.functional.dropout(x, p=self.dropout, training=self.training)
+ x = residual + x
+ x = self.final_layer_norm(x)
+ return x, attn_weights
+
+
+class FSMTEncoder(nn.Module):
+ """
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`EncoderLayer`].
+
+ Args:
+ config: FSMTConfig
+ """
+
+ def __init__(self, config: FSMTConfig, embed_tokens):
+ super().__init__()
+ self.dropout = config.dropout
+ self.layerdrop = config.encoder_layerdrop
+ self.padding_idx = embed_tokens.padding_idx
+ self.embed_tokens = embed_tokens
+ embed_dim = embed_tokens.embedding_dim
+ self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
+ self.embed_positions = SinusoidalPositionalEmbedding(
+ config.max_position_embeddings + self.padding_idx + 1, embed_dim, self.padding_idx
+ )
+ self.layers = nn.ModuleList([EncoderLayer(config) for _ in range(config.encoder_layers)]) # type: List[EncoderLayer]
+
+ def forward(
+ self,
+ input_ids: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: torch.Tensor = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ """
+ Args:
+ input_ids (`torch.LongTensor`): tokens in the source language of shape
+ *(batch, src_len)*
+ attention_mask (`torch.LongTensor`): indicating which indices are padding tokens
+ inputs_embeds (`torch.FloatTensor`):
+ embedding vectors of shape *(batch, src_len, embed_dim)*
+ head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ Returns:
+ BaseModelOutput or Tuple comprised of:
+
+ - **x** (`torch.Tensor`): the last encoder layer's output of shape *(src_len, batch, embed_dim)*
+ - **encoder_states** (`Tuple(torch.FloatTensor`)): all intermediate hidden states of shape *(src_len,
+ batch, embed_dim)*. Only populated if *output_hidden_states:* is True.
+ - **all_attentions** (`Tuple(torch.FloatTensor`)): Attention weights for each layer.
+ During training might not be of length n_layers because of layer dropout.
+ """
+ # check attention mask and invert
+ if attention_mask is not None:
+ attention_mask = invert_mask(attention_mask)
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+ embed_pos = self.embed_positions(input_ids)
+ elif inputs_embeds is not None:
+ inputs_embeds = inputs_embeds * self.embed_scale
+
+ # We assume zeros hidden states correspond to padding tokens
+ # and create `position_ids` where inputs_embeds[:, :, 0] == 0
+ position_ids = inputs_embeds[:, :, 0].masked_fill(
+ inputs_embeds[:, :, 0].eq(0), self.embed_positions.padding_idx
+ )
+
+ embed_pos = self.embed_positions(position_ids)
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ x = inputs_embeds + embed_pos
+ x = nn.functional.dropout(x, p=self.dropout, training=self.training)
+
+ # B x T x C -> T x B x C
+ x = x.transpose(0, 1)
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+ # check if head_mask has a correct number of layers specified if desired
+ if head_mask is not None:
+ assert head_mask.size()[0] == (
+ len(self.layers)
+ ), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ x = x.transpose(0, 1) # T x B x C -> B x T x C
+ encoder_states += (x,)
+ x = x.transpose(0, 1) # B x T x C -> T x B x C
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ dropout_probability = torch.rand([])
+ if self.training and (dropout_probability < self.layerdrop): # skip the layer
+ attn = None
+ else:
+ x, attn = encoder_layer(
+ x,
+ attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ output_attentions=output_attentions,
+ )
+
+ if output_attentions:
+ all_attentions = all_attentions + (attn,)
+
+ # T x B x C -> B x T x C
+ x = x.transpose(0, 1)
+
+ if output_hidden_states:
+ encoder_states += (x,)
+
+ if not return_dict:
+ return tuple(v for v in [x, encoder_states, all_attentions] if v is not None)
+ return BaseModelOutput(last_hidden_state=x, hidden_states=encoder_states, attentions=all_attentions)
+
+
+class DecoderLayer(nn.Module):
+ def __init__(self, config: FSMTConfig):
+ super().__init__()
+ self.embed_dim = config.d_model
+
+ self.self_attn = Attention(
+ embed_dim=self.embed_dim,
+ num_heads=config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ )
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+
+ self.self_attn_layer_norm = LayerNorm(self.embed_dim)
+ self.encoder_attn = Attention(
+ self.embed_dim,
+ config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ encoder_decoder_attention=True,
+ )
+ self.encoder_attn_layer_norm = LayerNorm(self.embed_dim)
+ self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
+ self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
+ self.final_layer_norm = LayerNorm(self.embed_dim)
+
+ def forward(
+ self,
+ x,
+ encoder_hidden_states,
+ encoder_attn_mask=None,
+ layer_state=None,
+ causal_mask=None,
+ layer_head_mask=None,
+ cross_attn_layer_head_mask=None,
+ decoder_padding_mask=None,
+ output_attentions=False,
+ ):
+ residual = x
+
+ if layer_state is None:
+ layer_state = {}
+
+ # Self Attention
+ x, self_attn_weights = self.self_attn(
+ query=x,
+ key=x,
+ layer_state=layer_state, # adds keys to layer state
+ key_padding_mask=decoder_padding_mask,
+ attn_mask=causal_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ x = nn.functional.dropout(x, p=self.dropout, training=self.training)
+ x = residual + x
+ x = self.self_attn_layer_norm(x)
+
+ # Cross attention
+ residual = x
+ assert self.encoder_attn.cache_key != self.self_attn.cache_key
+ x, cross_attn_weights = self.encoder_attn(
+ query=x,
+ key=encoder_hidden_states,
+ key_padding_mask=encoder_attn_mask,
+ layer_state=layer_state, # mutates layer state
+ layer_head_mask=cross_attn_layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ x = nn.functional.dropout(x, p=self.dropout, training=self.training)
+ x = residual + x
+ x = self.encoder_attn_layer_norm(x)
+
+ # Fully Connected
+ residual = x
+ x = self.activation_fn(self.fc1(x))
+ x = nn.functional.dropout(x, p=self.activation_dropout, training=self.training)
+ x = self.fc2(x)
+ x = nn.functional.dropout(x, p=self.dropout, training=self.training)
+ x = residual + x
+ x = self.final_layer_norm(x)
+ return (
+ x,
+ self_attn_weights,
+ layer_state,
+ cross_attn_weights,
+ ) # layer_state = cache for decoding
+
+
+class FSMTDecoder(nn.Module):
+ """
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`DecoderLayer`]
+
+ Args:
+ config: FSMTConfig
+ embed_tokens (nn.Embedding): output embedding
+ """
+
+ def __init__(self, config: FSMTConfig, embed_tokens: nn.Embedding):
+ super().__init__()
+ self.dropout = config.dropout
+ self.layerdrop = config.decoder_layerdrop
+ self.padding_idx = embed_tokens.padding_idx
+ self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
+ self.embed_tokens = embed_tokens
+ embed_dim = embed_tokens.embedding_dim
+ self.embed_positions = SinusoidalPositionalEmbedding(
+ config.max_position_embeddings + self.padding_idx + 1, embed_dim, self.padding_idx
+ )
+ self.layers = nn.ModuleList([DecoderLayer(config) for _ in range(config.decoder_layers)]) # type: List[DecoderLayer]
+
+ if is_deepspeed_zero3_enabled():
+ import deepspeed
+
+ with deepspeed.zero.GatheredParameters(self.embed_tokens.weight, modifier_rank=None):
+ embed_tokens_weight_shape = self.embed_tokens.weight.shape
+ else:
+ embed_tokens_weight_shape = self.embed_tokens.weight.shape
+ self.output_projection = nn.Linear(embed_tokens_weight_shape[1], embed_tokens_weight_shape[0], bias=False)
+ self.output_projection.weight = self.embed_tokens.weight
+
+ def forward(
+ self,
+ input_ids: torch.Tensor,
+ encoder_hidden_states: torch.Tensor,
+ encoder_padding_mask: torch.Tensor,
+ decoder_padding_mask: torch.Tensor,
+ decoder_causal_mask: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ use_cache: bool = False,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ """
+ Includes several features from "Jointly Learning to Align and Translate with Transformer Models" (Garg et al.,
+ EMNLP 2019).
+
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch, tgt_len)`):
+ previous decoder outputs for teacher forcing
+ encoder_hidden_states: output from the encoder, used for
+ encoder-side attention
+ encoder_padding_mask: for ignoring pad tokens
+ past_key_values (dict or None): dictionary used for storing state during generation
+ head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ Returns:
+ BaseModelOutputWithPast or tuple:
+
+ - the decoder's features of shape *(batch, tgt_len, embed_dim)*
+ - the cache
+ - hidden states
+ - attentions
+ """
+ # check attention mask and invert
+ if encoder_padding_mask is not None:
+ encoder_padding_mask = invert_mask(encoder_padding_mask)
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
+ elif input_ids is not None:
+ # embed positions
+ positions = self.embed_positions(input_ids)
+ if use_cache:
+ input_ids = input_ids[:, -1:]
+ positions = positions[:, -1:] # happens after we embed them
+ x = self.embed_tokens(input_ids) * self.embed_scale
+ elif inputs_embeds is not None:
+ # We assume zeros hidden states correspond to padding tokens
+ # and create `position_ids` where inputs_embeds[:, :, 0] == 0
+ position_ids = inputs_embeds[:, :, 0].masked_fill(
+ inputs_embeds[:, :, 0].eq(0), self.embed_positions.padding_idx
+ )
+ positions = self.embed_positions(position_ids)
+ x = inputs_embeds * self.embed_scale
+ else:
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
+
+ x += positions
+ x = nn.functional.dropout(x, p=self.dropout, training=self.training)
+
+ # Convert to FSMT output format: (BS, seq_len, model_dim) -> (seq_len, BS, model_dim)
+ x = x.transpose(0, 1)
+ encoder_hidden_states = encoder_hidden_states.transpose(0, 1)
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_cross_attns = () if output_attentions else None
+ next_decoder_cache = []
+
+ # check if head_mask has a correct number of layers specified if desired
+ for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
+ if attn_mask is not None:
+ assert attn_mask.size()[0] == (len(self.layers)), (
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
+ f" {head_mask.size()[0]}."
+ )
+ for idx, decoder_layer in enumerate(self.layers):
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ if output_hidden_states:
+ x = x.transpose(0, 1)
+ all_hidden_states += (x,)
+ x = x.transpose(0, 1)
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop:
+ continue
+
+ layer_state = past_key_values[idx] if past_key_values is not None else None
+
+ x, layer_self_attn, layer_past, layer_cross_attn = decoder_layer(
+ x,
+ encoder_hidden_states,
+ encoder_attn_mask=encoder_padding_mask,
+ decoder_padding_mask=decoder_padding_mask,
+ layer_state=layer_state,
+ causal_mask=decoder_causal_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ cross_attn_layer_head_mask=(cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None),
+ output_attentions=output_attentions,
+ )
+
+ if use_cache:
+ next_decoder_cache.append(layer_past.copy())
+
+ if output_attentions:
+ all_self_attns += (layer_self_attn,)
+ all_cross_attns += (layer_cross_attn,)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ x = x.transpose(0, 1)
+ all_hidden_states += (x,)
+ x = x.transpose(0, 1)
+
+ # Convert to standard output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)
+ x = x.transpose(0, 1)
+ encoder_hidden_states = encoder_hidden_states.transpose(0, 1)
+
+ x = self.output_projection(x)
+
+ next_cache = next_decoder_cache if use_cache else None
+
+ if not return_dict:
+ return tuple(
+ v for v in [x, next_cache, all_hidden_states, all_self_attns, all_cross_attns] if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=x,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ cross_attentions=all_cross_attns,
+ )
+
+
+def _reorder_buffer(attn_cache, new_order):
+ for k, input_buffer_k in attn_cache.items():
+ if input_buffer_k is not None:
+ attn_cache[k] = input_buffer_k.index_select(0, new_order)
+ return attn_cache
+
+
+class Attention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(
+ self,
+ embed_dim,
+ num_heads,
+ dropout=0.0,
+ bias=True,
+ encoder_decoder_attention=False, # otherwise self_attention
+ ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.dropout = dropout
+ self.head_dim = embed_dim // num_heads
+ assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
+ self.scaling = self.head_dim**-0.5
+
+ self.encoder_decoder_attention = encoder_decoder_attention
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.cache_key = "encoder_decoder" if self.encoder_decoder_attention else "self"
+
+ def _shape(self, tensor, seq_len, bsz):
+ return tensor.contiguous().view(seq_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
+
+ def forward(
+ self,
+ query,
+ key: Optional[Tensor],
+ key_padding_mask: Optional[Tensor] = None,
+ layer_state: Optional[Dict[str, Optional[Tensor]]] = None,
+ attn_mask: Optional[Tensor] = None,
+ layer_head_mask: Optional[Tensor] = None,
+ output_attentions=False,
+ ) -> Tuple[Tensor, Optional[Tensor]]:
+ """Input shape: Time(SeqLen) x Batch x Channel"""
+ static_kv: bool = self.encoder_decoder_attention
+ tgt_len, bsz, embed_dim = query.size()
+ assert embed_dim == self.embed_dim
+ assert list(query.size()) == [tgt_len, bsz, embed_dim]
+ # get here for encoder decoder cause of static_kv
+ if layer_state is not None: # reuse k,v and encoder_padding_mask
+ saved_state = layer_state.get(self.cache_key, {})
+ if "prev_key" in saved_state and static_kv:
+ # previous time steps are cached - no need to recompute key and value if they are static
+ key = None
+ else:
+ saved_state = None
+ layer_state = {}
+
+ q = self.q_proj(query) * self.scaling
+ if static_kv:
+ if key is None:
+ k = v = None
+ else:
+ k = self.k_proj(key)
+ v = self.v_proj(key)
+ else:
+ k = self.k_proj(query)
+ v = self.v_proj(query)
+
+ q = self._shape(q, tgt_len, bsz)
+ if k is not None:
+ k = self._shape(k, -1, bsz)
+ if v is not None:
+ v = self._shape(v, -1, bsz)
+
+ if saved_state is not None:
+ k, v, key_padding_mask = self._use_saved_state(k, v, saved_state, key_padding_mask, static_kv, bsz)
+
+ # Update cache
+ layer_state[self.cache_key] = {
+ "prev_key": k.view(bsz, self.num_heads, -1, self.head_dim),
+ "prev_value": v.view(bsz, self.num_heads, -1, self.head_dim),
+ "prev_key_padding_mask": key_padding_mask if not static_kv else None,
+ }
+
+ assert k is not None
+ src_len = k.size(1)
+ attn_weights = torch.bmm(q, k.transpose(1, 2))
+ assert attn_weights.size() == (bsz * self.num_heads, tgt_len, src_len)
+
+ if attn_mask is not None:
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_mask
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ # This is part of a workaround to get around fork/join parallelism not supporting Optional types.
+ if key_padding_mask is not None and key_padding_mask.dim() == 0:
+ key_padding_mask = None
+ assert key_padding_mask is None or key_padding_mask.size()[:2] == (
+ bsz,
+ src_len,
+ )
+
+ if key_padding_mask is not None: # don't attend to padding symbols
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ reshaped = key_padding_mask.unsqueeze(1).unsqueeze(2)
+ attn_weights = attn_weights.masked_fill(reshaped, torch.finfo(attn_weights.dtype).min)
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if layer_head_mask is not None:
+ assert layer_head_mask.size() == (
+ self.num_heads,
+ ), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}"
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ if output_attentions:
+ # make sure that attn_weights are included in graph
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
+ else:
+ attn_weights_reshaped = None
+
+ attn_probs = nn.functional.dropout(
+ attn_weights,
+ p=self.dropout,
+ training=self.training,
+ )
+
+ assert v is not None
+ attn_output = torch.bmm(attn_probs, v)
+ assert attn_output.size() == (bsz * self.num_heads, tgt_len, self.head_dim)
+ attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped
+
+ def _use_saved_state(self, k, v, saved_state, key_padding_mask, static_kv, bsz):
+ # saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
+ if "prev_key" in saved_state:
+ _prev_key = saved_state["prev_key"]
+ assert _prev_key is not None
+ prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
+ if static_kv:
+ k = prev_key
+ else:
+ assert k is not None
+ k = torch.cat([prev_key, k], dim=1)
+ if "prev_value" in saved_state:
+ _prev_value = saved_state["prev_value"]
+ assert _prev_value is not None
+ prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
+ if static_kv:
+ v = prev_value
+ else:
+ assert v is not None
+ v = torch.cat([prev_value, v], dim=1)
+ assert k is not None and v is not None
+ prev_key_padding_mask: Optional[Tensor] = saved_state.get("prev_key_padding_mask", None)
+ if prev_key_padding_mask is not None:
+ if static_kv:
+ new_key_padding_mask = prev_key_padding_mask
+ else:
+ new_key_padding_mask = torch.cat([prev_key_padding_mask, key_padding_mask], dim=1)
+ else:
+ new_key_padding_mask = key_padding_mask
+ return k, v, new_key_padding_mask
+
+
+def fill_with_neg_inf(t):
+ """FP16-compatible function that fills a input_ids with -inf."""
+ return t.float().fill_(torch.finfo(t.dtype).min).type_as(t)
+
+
+# Public API
+def _get_shape(t):
+ return getattr(t, "shape", None)
+
+
+@add_start_docstrings(
+ "The bare FSMT Model outputting raw hidden-states without any specific head on top.",
+ FSMT_START_DOCSTRING,
+)
+class FSMTModel(PretrainedFSMTModel):
+ _tied_weights_keys = ["decoder.embed_tokens.weight", "decoder.output_projection.weight"]
+
+ def __init__(self, config: FSMTConfig):
+ super().__init__(config)
+
+ padding_idx = config.pad_token_id
+ encoder_embed_tokens = nn.Embedding(config.src_vocab_size, config.d_model, padding_idx)
+ decoder_embed_tokens = nn.Embedding(config.tgt_vocab_size, config.d_model, padding_idx)
+
+ self.encoder = FSMTEncoder(config, encoder_embed_tokens)
+ self.decoder = FSMTDecoder(config, decoder_embed_tokens)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_encoder(self):
+ return self.encoder
+
+ def get_decoder(self):
+ return self.decoder
+
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.decoder.embed_tokens, self.get_input_embeddings())
+ self._tie_or_clone_weights(self.decoder.output_projection, self.get_input_embeddings())
+
+ @add_start_docstrings_to_model_forward(FSMT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=Seq2SeqModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: torch.LongTensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], Seq2SeqModelOutput]:
+ if decoder_input_ids is None:
+ use_cache = False
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # make masks if user doesn't supply
+ if not use_cache and input_ids is not None:
+ decoder_input_ids, decoder_padding_mask, causal_mask = _prepare_fsmt_decoder_inputs(
+ self.config,
+ input_ids,
+ decoder_input_ids=decoder_input_ids,
+ decoder_padding_mask=decoder_attention_mask,
+ causal_mask_dtype=self.decoder.embed_tokens.weight.dtype,
+ )
+ else:
+ decoder_padding_mask, causal_mask = None, None
+
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
+ raise ValueError("Make sure that `decoder_input_ids` or `decoder_inputs_embeds` are passed.")
+
+ if encoder_outputs is None:
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=False
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
+ decoder_outputs = self.decoder(
+ decoder_input_ids,
+ encoder_outputs[0],
+ attention_mask,
+ decoder_padding_mask,
+ decoder_causal_mask=causal_mask,
+ inputs_embeds=decoder_inputs_embeds,
+ head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+
+ return Seq2SeqModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+ def get_input_embeddings(self):
+ return self.encoder.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.encoder.embed_tokens = value
+
+ def get_output_embeddings(self):
+ return self.decoder.embed_tokens
+
+ def set_output_embeddings(self, value):
+ self.decoder.embed_tokens = value
+
+
+@add_start_docstrings(
+ "The FSMT Model with a language modeling head. Can be used for summarization.", FSMT_START_DOCSTRING
+)
+class FSMTForConditionalGeneration(PretrainedFSMTModel):
+ base_model_prefix = "model"
+ _tied_weights_keys = ["decoder.embed_tokens.weight", "decoder.output_projection.weight"]
+
+ def __init__(self, config: FSMTConfig):
+ super().__init__(config)
+ base_model = FSMTModel(config)
+ self.model = base_model
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(FSMT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+ @add_end_docstrings(FSMT_GENERATION_EXAMPLE)
+ def forward(
+ self,
+ input_ids: torch.LongTensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ decoder_inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], Seq2SeqLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ Returns:
+
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if labels is not None:
+ use_cache = False
+
+ outputs = self.model(
+ input_ids,
+ inputs_embeds=inputs_embeds,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ decoder_inputs_embeds=decoder_inputs_embeds,
+ encoder_outputs=encoder_outputs,
+ decoder_attention_mask=decoder_attention_mask,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ lm_logits = outputs[0]
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ # TODO(SS): do we need to ignore pad tokens in labels?
+ masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.tgt_vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (lm_logits,) + outputs[1:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return Seq2SeqLMOutput(
+ loss=masked_lm_loss,
+ logits=lm_logits,
+ past_key_values=outputs.past_key_values,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ head_mask=None,
+ decoder_head_mask=None,
+ cross_attn_head_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ return {
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
+ "encoder_outputs": encoder_outputs,
+ "past_key_values": past_key_values,
+ "decoder_input_ids": decoder_input_ids,
+ "attention_mask": attention_mask,
+ "head_mask": head_mask,
+ "decoder_head_mask": decoder_head_mask,
+ "cross_attn_head_mask": cross_attn_head_mask,
+ "use_cache": use_cache, # change this to avoid caching (presumably for debugging)
+ }
+
+ def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
+ return shift_tokens_right(labels, self.config.pad_token_id)
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = []
+ for layer_past in past_key_values:
+ # get the correct batch idx from decoder layer's batch dim for cross and self-attn
+ layer_past_new = {
+ attn_key: _reorder_buffer(attn_cache, beam_idx) for attn_key, attn_cache in layer_past.items()
+ }
+ reordered_past.append(layer_past_new)
+ return reordered_past
+
+ def get_encoder(self):
+ return self.model.encoder
+
+ def get_decoder(self):
+ return self.model.decoder
+
+ def get_output_embeddings(self):
+ return self.model.decoder.embed_tokens
+
+ def set_output_embeddings(self, value):
+ self.model.decoder.embed_tokens = value
+
+
+class SinusoidalPositionalEmbedding(nn.Embedding):
+ """
+ This module produces sinusoidal positional embeddings of any length.
+
+ We don't want to save the weight of this embedding since it's not trained (deterministic) and it can be huge.
+
+ Padding symbols are ignored.
+
+ These embeddings get automatically extended in forward if more positions is needed.
+ """
+
+ def __init__(self, num_positions, embedding_dim, padding_idx):
+ self.make_weight(num_positions, embedding_dim, padding_idx)
+
+ def make_weight(self, num_positions, embedding_dim, padding_idx):
+ weight = self.get_embedding(num_positions, embedding_dim, padding_idx)
+ if not hasattr(self, "weight"):
+ # in ___init__
+ super().__init__(num_positions, embedding_dim, padding_idx, _weight=weight)
+ else:
+ # in forward put the weights on the correct dtype and device of the param
+ weight = weight.to(dtype=self.weight.dtype, device=self.weight.device)
+ self.weight = nn.Parameter(weight)
+ self.weight.detach_()
+ self.weight.requires_grad = False
+
+ @staticmethod
+ def get_embedding(num_embeddings, embedding_dim, padding_idx):
+ """
+ Build sinusoidal embeddings.
+
+ This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of
+ "Attention Is All You Need".
+ """
+ half_dim = embedding_dim // 2
+ emb = math.log(10000) / (half_dim - 1)
+ emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb)
+ emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0)
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
+ if embedding_dim % 2 == 1:
+ # zero pad
+ emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
+ if padding_idx is not None:
+ emb[padding_idx, :] = 0
+ return emb
+
+ @staticmethod
+ def make_positions(tensor, padding_idx: int):
+ """
+ Replace non-padding symbols with their position numbers.
+
+ Position numbers begin at padding_idx+1. Padding symbols are ignored.
+ """
+ # The series of casts and type-conversions here are carefully
+ # balanced to both work with ONNX export and XLA. In particular XLA
+ # prefers ints, cumsum defaults to output longs, and ONNX doesn't know
+ # how to handle the dtype kwarg in cumsum.
+ mask = tensor.ne(padding_idx).int()
+ return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx
+
+ def forward(
+ self,
+ input,
+ incremental_state: Optional[Any] = None,
+ timestep: Optional[Tensor] = None,
+ ):
+ """Input is expected to be of size [bsz x seqlen]."""
+ bsz, seq_len = input.shape[:2]
+ max_pos = self.padding_idx + 1 + seq_len
+ if max_pos > self.weight.size(0):
+ # expand embeddings if needed
+ self.make_weight(max_pos, self.embedding_dim, self.padding_idx)
+ positions = self.make_positions(input, self.padding_idx)
+ return super().forward(positions)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/tokenization_fsmt.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/tokenization_fsmt.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b0be1f8be24987259aaee01d3165aa03c9218a9
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/fsmt/tokenization_fsmt.py
@@ -0,0 +1,519 @@
+# coding=utf-8
+# Copyright 2019 The Open AI Team Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for FSMT."""
+
+
+import json
+import os
+import re
+import unicodedata
+from typing import Dict, List, Optional, Tuple
+
+from ...tokenization_utils import PreTrainedTokenizer
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {
+ "src_vocab_file": "vocab-src.json",
+ "tgt_vocab_file": "vocab-tgt.json",
+ "merges_file": "merges.txt",
+}
+
+
+def get_pairs(word):
+ """
+ Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length
+ strings)
+ """
+ pairs = set()
+ prev_char = word[0]
+ for char in word[1:]:
+ pairs.add((prev_char, char))
+ prev_char = char
+ return pairs
+
+
+def replace_unicode_punct(text):
+ """
+ Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl
+ """
+ text = text.replace(",", ",")
+ text = re.sub(r"。\s*", ". ", text)
+ text = text.replace("、", ",")
+ text = text.replace("”", '"')
+ text = text.replace("“", '"')
+ text = text.replace("∶", ":")
+ text = text.replace(":", ":")
+ text = text.replace("?", "?")
+ text = text.replace("《", '"')
+ text = text.replace("》", '"')
+ text = text.replace(")", ")")
+ text = text.replace("!", "!")
+ text = text.replace("(", "(")
+ text = text.replace(";", ";")
+ text = text.replace("1", "1")
+ text = text.replace("」", '"')
+ text = text.replace("「", '"')
+ text = text.replace("0", "0")
+ text = text.replace("3", "3")
+ text = text.replace("2", "2")
+ text = text.replace("5", "5")
+ text = text.replace("6", "6")
+ text = text.replace("9", "9")
+ text = text.replace("7", "7")
+ text = text.replace("8", "8")
+ text = text.replace("4", "4")
+ text = re.sub(r".\s*", ". ", text)
+ text = text.replace("~", "~")
+ text = text.replace("’", "'")
+ text = text.replace("…", "...")
+ text = text.replace("━", "-")
+ text = text.replace("〈", "<")
+ text = text.replace("〉", ">")
+ text = text.replace("【", "[")
+ text = text.replace("】", "]")
+ text = text.replace("%", "%")
+ return text
+
+
+def remove_non_printing_char(text):
+ """
+ Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/remove-non-printing-char.perl
+ """
+ output = []
+ for char in text:
+ cat = unicodedata.category(char)
+ if cat.startswith("C"):
+ continue
+ output.append(char)
+ return "".join(output)
+
+
+# Porting notes:
+# this one is modeled after XLMTokenizer
+#
+# added:
+# - src_vocab_file,
+# - tgt_vocab_file,
+# - langs,
+
+
+class FSMTTokenizer(PreTrainedTokenizer):
+ """
+ Construct an FAIRSEQ Transformer tokenizer. Based on Byte-Pair Encoding. The tokenization process is the following:
+
+ - Moses preprocessing and tokenization.
+ - Normalizing all inputs text.
+ - The arguments `special_tokens` and the function `set_special_tokens`, can be used to add additional symbols (like
+ "__classify__") to a vocabulary.
+ - The argument `langs` defines a pair of languages.
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ langs (`List[str]`, *optional*):
+ A list of two languages to translate from and to, for instance `["en", "ru"]`.
+ src_vocab_file (`str`, *optional*):
+ File containing the vocabulary for the source language.
+ tgt_vocab_file (`st`, *optional*):
+ File containing the vocabulary for the target language.
+ merges_file (`str`, *optional*):
+ File containing the merges.
+ do_lower_case (`bool`, *optional*, defaults to `False`):
+ Whether or not to lowercase the input when tokenizing.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ langs=None,
+ src_vocab_file=None,
+ tgt_vocab_file=None,
+ merges_file=None,
+ do_lower_case=False,
+ unk_token="",
+ bos_token="",
+ sep_token="",
+ pad_token="",
+ **kwargs,
+ ):
+ try:
+ import sacremoses
+ except ImportError:
+ raise ImportError(
+ "You need to install sacremoses to use XLMTokenizer. "
+ "See https://pypi.org/project/sacremoses/ for installation."
+ )
+
+ self.sm = sacremoses
+
+ self.src_vocab_file = src_vocab_file
+ self.tgt_vocab_file = tgt_vocab_file
+ self.merges_file = merges_file
+ self.do_lower_case = do_lower_case
+
+ # cache of sm.MosesPunctNormalizer instance
+ self.cache_moses_punct_normalizer = {}
+ # cache of sm.MosesTokenizer instance
+ self.cache_moses_tokenizer = {}
+ self.cache_moses_detokenizer = {}
+
+ if langs and len(langs) == 2:
+ self.src_lang, self.tgt_lang = langs
+ else:
+ raise ValueError(
+ f"arg `langs` needs to be a list of 2 langs, e.g. ['en', 'ru'], but got {langs}. "
+ "Usually that means that tokenizer can't find a mapping for the given model path "
+ "in PRETRAINED_VOCAB_FILES_MAP, and other maps of this tokenizer."
+ )
+
+ with open(src_vocab_file, encoding="utf-8") as src_vocab_handle:
+ self.encoder = json.load(src_vocab_handle)
+ with open(tgt_vocab_file, encoding="utf-8") as tgt_vocab_handle:
+ tgt_vocab = json.load(tgt_vocab_handle)
+ self.decoder = {v: k for k, v in tgt_vocab.items()}
+ with open(merges_file, encoding="utf-8") as merges_handle:
+ merges = merges_handle.read().split("\n")[:-1]
+ merges = [tuple(merge.split()[:2]) for merge in merges]
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
+ self.cache = {}
+ super().__init__(
+ langs=langs,
+ src_vocab_file=src_vocab_file,
+ tgt_vocab_file=tgt_vocab_file,
+ merges_file=merges_file,
+ do_lower_case=do_lower_case,
+ unk_token=unk_token,
+ bos_token=bos_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ **kwargs,
+ )
+
+ # hack override
+ def get_vocab(self) -> Dict[str, int]:
+ return self.get_src_vocab()
+
+ # hack override
+ @property
+ def vocab_size(self) -> int:
+ return self.src_vocab_size
+
+ def moses_punct_norm(self, text, lang):
+ if lang not in self.cache_moses_punct_normalizer:
+ punct_normalizer = self.sm.MosesPunctNormalizer(lang=lang)
+ self.cache_moses_punct_normalizer[lang] = punct_normalizer
+ return self.cache_moses_punct_normalizer[lang].normalize(text)
+
+ def moses_tokenize(self, text, lang):
+ if lang not in self.cache_moses_tokenizer:
+ moses_tokenizer = self.sm.MosesTokenizer(lang=lang)
+ self.cache_moses_tokenizer[lang] = moses_tokenizer
+ return self.cache_moses_tokenizer[lang].tokenize(
+ text, aggressive_dash_splits=True, return_str=False, escape=True
+ )
+
+ def moses_detokenize(self, tokens, lang):
+ if lang not in self.cache_moses_detokenizer:
+ moses_detokenizer = self.sm.MosesDetokenizer(lang=lang)
+ self.cache_moses_detokenizer[lang] = moses_detokenizer
+ return self.cache_moses_detokenizer[lang].detokenize(tokens)
+
+ def moses_pipeline(self, text, lang):
+ text = replace_unicode_punct(text)
+ text = self.moses_punct_norm(text, lang)
+ text = remove_non_printing_char(text)
+ return text
+
+ @property
+ def src_vocab_size(self):
+ return len(self.encoder)
+
+ @property
+ def tgt_vocab_size(self):
+ return len(self.decoder)
+
+ def get_src_vocab(self):
+ return dict(self.encoder, **self.added_tokens_encoder)
+
+ def get_tgt_vocab(self):
+ return dict(self.decoder, **self.added_tokens_decoder)
+
+ def bpe(self, token):
+ word = tuple(token[:-1]) + (token[-1] + "",)
+ if token in self.cache:
+ return self.cache[token]
+ pairs = get_pairs(word)
+
+ if not pairs:
+ return token + ""
+
+ while True:
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
+ if bigram not in self.bpe_ranks:
+ break
+ first, second = bigram
+ new_word = []
+ i = 0
+ while i < len(word):
+ try:
+ j = word.index(first, i)
+ except ValueError:
+ new_word.extend(word[i:])
+ break
+ else:
+ new_word.extend(word[i:j])
+ i = j
+
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
+ new_word.append(first + second)
+ i += 2
+ else:
+ new_word.append(word[i])
+ i += 1
+ new_word = tuple(new_word)
+ word = new_word
+ if len(word) == 1:
+ break
+ else:
+ pairs = get_pairs(word)
+ word = " ".join(word)
+ if word == "\n ":
+ word = "\n"
+ self.cache[token] = word
+ return word
+
+ def _tokenize(self, text, lang="en", bypass_tokenizer=False):
+ """
+ Tokenize a string given language code using Moses.
+
+ Details of tokenization:
+
+ - [sacremoses](https://github.com/alvations/sacremoses): port of Moses
+ - Install with `pip install sacremoses`
+
+ Args:
+ - lang: ISO language code (default = 'en') (string). Languages should belong of the model supported
+ languages. However, we don't enforce it.
+ - bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False)
+ (bool). If True, we only apply BPE.
+
+ Returns:
+ List of tokens.
+ """
+ # ignore `lang` which is currently isn't explicitly passed in tokenization_utils.py and always results in lang=en
+ # if lang != self.src_lang:
+ # raise ValueError(f"Expected lang={self.src_lang}, but got {lang}")
+ lang = self.src_lang
+
+ if self.do_lower_case:
+ text = text.lower()
+
+ if bypass_tokenizer:
+ text = text.split()
+ else:
+ text = self.moses_pipeline(text, lang=lang)
+ text = self.moses_tokenize(text, lang=lang)
+
+ split_tokens = []
+ for token in text:
+ if token:
+ split_tokens.extend(list(self.bpe(token).split(" ")))
+
+ return split_tokens
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.decoder.get(index, self.unk_token)
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+
+ # remove BPE
+ tokens = [t.replace(" ", "").replace("", " ") for t in tokens]
+ tokens = "".join(tokens).split()
+ # detokenize
+ text = self.moses_detokenize(tokens, self.tgt_lang)
+ return text
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A FAIRSEQ Transformer sequence has the following format:
+
+ - single sequence: ` X `
+ - pair of sequences: ` A B `
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ sep = [self.sep_token_id]
+
+ # no bos used in fairseq
+ if token_ids_1 is None:
+ return token_ids_0 + sep
+ return token_ids_0 + sep + token_ids_1 + sep
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+ # no bos used in fairseq
+ if token_ids_1 is not None:
+ return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
+ return ([0] * len(token_ids_0)) + [1]
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A FAIRSEQ
+ Transformer sequence pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An
+ FAIRSEQ_TRANSFORMER sequence pair mask has the following format:
+ """
+ sep = [self.sep_token_id]
+
+ # no bos used in fairseq
+ if token_ids_1 is None:
+ return len(token_ids_0 + sep) * [0]
+ return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+
+ src_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["src_vocab_file"]
+ )
+ tgt_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["tgt_vocab_file"]
+ )
+ merges_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
+ )
+
+ with open(src_vocab_file, "w", encoding="utf-8") as f:
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
+
+ with open(tgt_vocab_file, "w", encoding="utf-8") as f:
+ tgt_vocab = {v: k for k, v in self.decoder.items()}
+ f.write(json.dumps(tgt_vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
+
+ index = 0
+ with open(merges_file, "w", encoding="utf-8") as writer:
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
+ if index != token_index:
+ logger.warning(
+ f"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
+ " Please check that the tokenizer is not corrupted!"
+ )
+ index = token_index
+ writer.write(" ".join(bpe_tokens) + "\n")
+ index += 1
+
+ return src_vocab_file, tgt_vocab_file, merges_file
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["sm"] = None
+ return state
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+
+ try:
+ import sacremoses
+ except ImportError:
+ raise ImportError(
+ "You need to install sacremoses to use XLMTokenizer. "
+ "See https://pypi.org/project/sacremoses/ for installation."
+ )
+
+ self.sm = sacremoses
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/herbert/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/herbert/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..54037995229f829e961f96670b86066097d69471
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/herbert/__init__.py
@@ -0,0 +1,45 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
+
+
+_import_structure = {"tokenization_herbert": ["HerbertTokenizer"]}
+
+try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_herbert_fast"] = ["HerbertTokenizerFast"]
+
+
+if TYPE_CHECKING:
+ from .tokenization_herbert import HerbertTokenizer
+
+ try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_herbert_fast import HerbertTokenizerFast
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/herbert/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/herbert/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9883b94939c2177801bd95ee69334448d42a9c49
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/herbert/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/herbert/__pycache__/tokenization_herbert.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/herbert/__pycache__/tokenization_herbert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5e8a484a6ded2e822d55206d59cd1198757e4061
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/herbert/__pycache__/tokenization_herbert.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/herbert/__pycache__/tokenization_herbert_fast.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/herbert/__pycache__/tokenization_herbert_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3b479635f6c773d4b10164cc91d6b184c4dca97d
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/herbert/__pycache__/tokenization_herbert_fast.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/herbert/tokenization_herbert.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/herbert/tokenization_herbert.py
new file mode 100644
index 0000000000000000000000000000000000000000..6e37922028e7beddf34bebdb7109cdcf0f7b3fb7
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/herbert/tokenization_herbert.py
@@ -0,0 +1,644 @@
+# coding=utf-8
+# Copyright 2020 The Google AI Language Team Authors, Allegro.pl, Facebook Inc. and the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import json
+import os
+import re
+import unicodedata
+from typing import List, Optional, Tuple
+
+from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {
+ "vocab_file": "vocab.json",
+ "merges_file": "merges.txt",
+}
+
+
+# Copied from transformers.models.xlm.tokenization_xlm.get_pairs
+def get_pairs(word):
+ """
+ Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length
+ strings)
+ """
+ pairs = set()
+ prev_char = word[0]
+ for char in word[1:]:
+ pairs.add((prev_char, char))
+ prev_char = char
+ return pairs
+
+
+# Copied from transformers.models.xlm.tokenization_xlm.replace_unicode_punct
+def replace_unicode_punct(text):
+ """
+ Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl
+ """
+ text = text.replace(",", ",")
+ text = re.sub(r"。\s*", ". ", text)
+ text = text.replace("、", ",")
+ text = text.replace("”", '"')
+ text = text.replace("“", '"')
+ text = text.replace("∶", ":")
+ text = text.replace(":", ":")
+ text = text.replace("?", "?")
+ text = text.replace("《", '"')
+ text = text.replace("》", '"')
+ text = text.replace(")", ")")
+ text = text.replace("!", "!")
+ text = text.replace("(", "(")
+ text = text.replace(";", ";")
+ text = text.replace("1", "1")
+ text = text.replace("」", '"')
+ text = text.replace("「", '"')
+ text = text.replace("0", "0")
+ text = text.replace("3", "3")
+ text = text.replace("2", "2")
+ text = text.replace("5", "5")
+ text = text.replace("6", "6")
+ text = text.replace("9", "9")
+ text = text.replace("7", "7")
+ text = text.replace("8", "8")
+ text = text.replace("4", "4")
+ text = re.sub(r".\s*", ". ", text)
+ text = text.replace("~", "~")
+ text = text.replace("’", "'")
+ text = text.replace("…", "...")
+ text = text.replace("━", "-")
+ text = text.replace("〈", "<")
+ text = text.replace("〉", ">")
+ text = text.replace("【", "[")
+ text = text.replace("】", "]")
+ text = text.replace("%", "%")
+ return text
+
+
+# Copied from transformers.models.xlm.tokenization_xlm.remove_non_printing_char
+def remove_non_printing_char(text):
+ """
+ Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/remove-non-printing-char.perl
+ """
+ output = []
+ for char in text:
+ cat = unicodedata.category(char)
+ if cat.startswith("C"):
+ continue
+ output.append(char)
+ return "".join(output)
+
+
+# Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
+def whitespace_tokenize(text):
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
+ text = text.strip()
+ if not text:
+ return []
+ tokens = text.split()
+ return tokens
+
+
+# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
+class BasicTokenizer(object):
+ """
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
+
+ Args:
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ never_split (`Iterable`, *optional*):
+ Collection of tokens which will never be split during tokenization. Only has an effect when
+ `do_basic_tokenize=True`
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters.
+
+ This should likely be deactivated for Japanese (see this
+ [issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
+ the full context of the words, such as contractions.
+ """
+
+ def __init__(
+ self,
+ do_lower_case=True,
+ never_split=None,
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ do_split_on_punc=True,
+ ):
+ if never_split is None:
+ never_split = []
+ self.do_lower_case = do_lower_case
+ self.never_split = set(never_split)
+ self.tokenize_chinese_chars = tokenize_chinese_chars
+ self.strip_accents = strip_accents
+ self.do_split_on_punc = do_split_on_punc
+
+ def tokenize(self, text, never_split=None):
+ """
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
+
+ Args:
+ never_split (`List[str]`, *optional*)
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
+ """
+ # union() returns a new set by concatenating the two sets.
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
+ text = self._clean_text(text)
+
+ # This was added on November 1st, 2018 for the multilingual and Chinese
+ # models. This is also applied to the English models now, but it doesn't
+ # matter since the English models were not trained on any Chinese data
+ # and generally don't have any Chinese data in them (there are Chinese
+ # characters in the vocabulary because Wikipedia does have some Chinese
+ # words in the English Wikipedia.).
+ if self.tokenize_chinese_chars:
+ text = self._tokenize_chinese_chars(text)
+ # prevents treating the same character with different unicode codepoints as different characters
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
+ split_tokens = []
+ for token in orig_tokens:
+ if token not in never_split:
+ if self.do_lower_case:
+ token = token.lower()
+ if self.strip_accents is not False:
+ token = self._run_strip_accents(token)
+ elif self.strip_accents:
+ token = self._run_strip_accents(token)
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
+
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
+ return output_tokens
+
+ def _run_strip_accents(self, text):
+ """Strips accents from a piece of text."""
+ text = unicodedata.normalize("NFD", text)
+ output = []
+ for char in text:
+ cat = unicodedata.category(char)
+ if cat == "Mn":
+ continue
+ output.append(char)
+ return "".join(output)
+
+ def _run_split_on_punc(self, text, never_split=None):
+ """Splits punctuation on a piece of text."""
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
+ return [text]
+ chars = list(text)
+ i = 0
+ start_new_word = True
+ output = []
+ while i < len(chars):
+ char = chars[i]
+ if _is_punctuation(char):
+ output.append([char])
+ start_new_word = True
+ else:
+ if start_new_word:
+ output.append([])
+ start_new_word = False
+ output[-1].append(char)
+ i += 1
+
+ return ["".join(x) for x in output]
+
+ def _tokenize_chinese_chars(self, text):
+ """Adds whitespace around any CJK character."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if self._is_chinese_char(cp):
+ output.append(" ")
+ output.append(char)
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+ def _is_chinese_char(self, cp):
+ """Checks whether CP is the codepoint of a CJK character."""
+ # This defines a "chinese character" as anything in the CJK Unicode block:
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
+ #
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
+ # despite its name. The modern Korean Hangul alphabet is a different block,
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
+ # space-separated words, so they are not treated specially and handled
+ # like the all of the other languages.
+ if (
+ (cp >= 0x4E00 and cp <= 0x9FFF)
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
+ or (cp >= 0xF900 and cp <= 0xFAFF)
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
+ ): #
+ return True
+
+ return False
+
+ def _clean_text(self, text):
+ """Performs invalid character removal and whitespace cleanup on text."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
+ continue
+ if _is_whitespace(char):
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+
+class HerbertTokenizer(PreTrainedTokenizer):
+ """
+ Construct a BPE tokenizer for HerBERT.
+
+ Peculiarities:
+
+ - uses BERT's pre-tokenizer: BaseTokenizer splits tokens on spaces, and also on punctuation. Each occurrence of a
+ punctuation character will be treated separately.
+
+ - Such pretokenized input is BPE subtokenized
+
+ This tokenizer inherits from [`XLMTokenizer`] which contains most of the methods. Users should refer to the
+ superclass for more information regarding methods.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+
+ def __init__(
+ self,
+ vocab_file,
+ merges_file,
+ tokenizer_file=None,
+ cls_token="",
+ unk_token="",
+ pad_token="",
+ mask_token="",
+ sep_token="",
+ bos_token="",
+ do_lowercase_and_remove_accent=False,
+ additional_special_tokens=[
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ ],
+ lang2id=None,
+ id2lang=None,
+ **kwargs,
+ ):
+ try:
+ import sacremoses
+ except ImportError:
+ raise ImportError(
+ "You need to install sacremoses to use HerbertTokenizer. "
+ "See https://pypi.org/project/sacremoses/ for installation."
+ )
+
+ self.sm = sacremoses
+
+ # cache of sm.MosesPunctNormalizer instance
+ self.cache_moses_punct_normalizer = {}
+ # cache of sm.MosesTokenizer instance
+ self.cache_moses_tokenizer = {}
+ self.lang_with_custom_tokenizer = {"zh", "th", "ja"}
+ # True for current supported model (v1.2.0), False for XLM-17 & 100
+ self.do_lowercase_and_remove_accent = do_lowercase_and_remove_accent
+ self.lang2id = lang2id
+ self.id2lang = id2lang
+ if lang2id is not None and id2lang is not None:
+ assert len(lang2id) == len(id2lang)
+
+ self.ja_word_tokenizer = None
+ self.zh_word_tokenizer = None
+
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
+ self.encoder = json.load(vocab_handle)
+ self.decoder = {v: k for k, v in self.encoder.items()}
+ with open(merges_file, encoding="utf-8") as merges_handle:
+ merges = merges_handle.read().split("\n")[:-1]
+ merges = [tuple(merge.split()[:2]) for merge in merges]
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
+ self.cache = {}
+
+ super().__init__(
+ unk_token=unk_token,
+ bos_token=bos_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ additional_special_tokens=additional_special_tokens,
+ lang2id=lang2id,
+ id2lang=id2lang,
+ do_lowercase_and_remove_accent=do_lowercase_and_remove_accent,
+ tokenizer_file=None,
+ **kwargs,
+ )
+
+ self.bert_pre_tokenizer = BasicTokenizer(
+ do_lower_case=False,
+ never_split=self.all_special_tokens,
+ tokenize_chinese_chars=False,
+ strip_accents=False,
+ )
+
+ @property
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.do_lower_case
+ def do_lower_case(self):
+ return self.do_lowercase_and_remove_accent
+
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.moses_punct_norm
+ def moses_punct_norm(self, text, lang):
+ if lang not in self.cache_moses_punct_normalizer:
+ punct_normalizer = self.sm.MosesPunctNormalizer(lang=lang)
+ self.cache_moses_punct_normalizer[lang] = punct_normalizer
+ else:
+ punct_normalizer = self.cache_moses_punct_normalizer[lang]
+ return punct_normalizer.normalize(text)
+
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.moses_tokenize
+ def moses_tokenize(self, text, lang):
+ if lang not in self.cache_moses_tokenizer:
+ moses_tokenizer = self.sm.MosesTokenizer(lang=lang)
+ self.cache_moses_tokenizer[lang] = moses_tokenizer
+ else:
+ moses_tokenizer = self.cache_moses_tokenizer[lang]
+ return moses_tokenizer.tokenize(text, return_str=False, escape=False)
+
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.moses_pipeline
+ def moses_pipeline(self, text, lang):
+ text = replace_unicode_punct(text)
+ text = self.moses_punct_norm(text, lang)
+ text = remove_non_printing_char(text)
+ return text
+
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.ja_tokenize
+ def ja_tokenize(self, text):
+ if self.ja_word_tokenizer is None:
+ try:
+ import Mykytea
+
+ self.ja_word_tokenizer = Mykytea.Mykytea(
+ f"-model {os.path.expanduser('~')}/local/share/kytea/model.bin"
+ )
+ except (AttributeError, ImportError):
+ logger.error(
+ "Make sure you install KyTea (https://github.com/neubig/kytea) and it's python wrapper"
+ " (https://github.com/chezou/Mykytea-python) with the following steps"
+ )
+ logger.error("1. git clone git@github.com:neubig/kytea.git && cd kytea")
+ logger.error("2. autoreconf -i")
+ logger.error("3. ./configure --prefix=$HOME/local")
+ logger.error("4. make && make install")
+ logger.error("5. pip install kytea")
+ raise
+ return list(self.ja_word_tokenizer.getWS(text))
+
+ @property
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.vocab_size
+ def vocab_size(self):
+ return len(self.encoder)
+
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.get_vocab
+ def get_vocab(self):
+ return dict(self.encoder, **self.added_tokens_encoder)
+
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.bpe
+ def bpe(self, token):
+ word = tuple(token[:-1]) + (token[-1] + "",)
+ if token in self.cache:
+ return self.cache[token]
+ pairs = get_pairs(word)
+
+ if not pairs:
+ return token + ""
+
+ while True:
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
+ if bigram not in self.bpe_ranks:
+ break
+ first, second = bigram
+ new_word = []
+ i = 0
+ while i < len(word):
+ try:
+ j = word.index(first, i)
+ except ValueError:
+ new_word.extend(word[i:])
+ break
+ else:
+ new_word.extend(word[i:j])
+ i = j
+
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
+ new_word.append(first + second)
+ i += 2
+ else:
+ new_word.append(word[i])
+ i += 1
+ new_word = tuple(new_word)
+ word = new_word
+ if len(word) == 1:
+ break
+ else:
+ pairs = get_pairs(word)
+ word = " ".join(word)
+ if word == "\n ":
+ word = "\n"
+ self.cache[token] = word
+ return word
+
+ def _tokenize(self, text):
+ pre_tokens = self.bert_pre_tokenizer.tokenize(text)
+
+ split_tokens = []
+ for token in pre_tokens:
+ if token:
+ split_tokens.extend(list(self.bpe(token).split(" ")))
+
+ return split_tokens
+
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer._convert_token_to_id
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
+
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer._convert_id_to_token
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.decoder.get(index, self.unk_token)
+
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.convert_tokens_to_string
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ out_string = "".join(tokens).replace("", " ").strip()
+ return out_string
+
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.build_inputs_with_special_tokens
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. An XLM sequence has the following format:
+
+ - single sequence: ` X `
+ - pair of sequences: ` A B `
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+
+ """
+ bos = [self.bos_token_id]
+ sep = [self.sep_token_id]
+
+ if token_ids_1 is None:
+ return bos + token_ids_0 + sep
+ return bos + token_ids_0 + sep + token_ids_1 + sep
+
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.get_special_tokens_mask
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is not None:
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1]
+
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.create_token_type_ids_from_sequences
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLM sequence
+ pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.save_vocabulary
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+ merge_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
+ )
+
+ with open(vocab_file, "w", encoding="utf-8") as f:
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
+
+ index = 0
+ with open(merge_file, "w", encoding="utf-8") as writer:
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
+ if index != token_index:
+ logger.warning(
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
+ " Please check that the tokenizer is not corrupted!"
+ )
+ index = token_index
+ writer.write(" ".join(bpe_tokens) + "\n")
+ index += 1
+
+ return vocab_file, merge_file
+
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.__getstate__
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["sm"] = None
+ return state
+
+ # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.__setstate__
+ def __setstate__(self, d):
+ self.__dict__ = d
+
+ try:
+ import sacremoses
+ except ImportError:
+ raise ImportError(
+ "You need to install sacremoses to use XLMTokenizer. "
+ "See https://pypi.org/project/sacremoses/ for installation."
+ )
+
+ self.sm = sacremoses
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/herbert/tokenization_herbert_fast.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/herbert/tokenization_herbert_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..4cd5db58f1b93a0576bdcc1457a416e0f5856315
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/herbert/tokenization_herbert_fast.py
@@ -0,0 +1,158 @@
+# coding=utf-8
+# Copyright 2020 The Google AI Language Team Authors, Allegro.pl, Facebook Inc. and the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import List, Optional, Tuple
+
+from ...tokenization_utils_fast import PreTrainedTokenizerFast
+from ...utils import logging
+from .tokenization_herbert import HerbertTokenizer
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
+
+
+class HerbertTokenizerFast(PreTrainedTokenizerFast):
+ """
+ Construct a "Fast" BPE tokenizer for HerBERT (backed by HuggingFace's *tokenizers* library).
+
+ Peculiarities:
+
+ - uses BERT's pre-tokenizer: BertPreTokenizer splits tokens on spaces, and also on punctuation. Each occurrence of
+ a punctuation character will be treated separately.
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the methods. Users should refer to the
+ superclass for more information regarding methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ merges_file (`str`):
+ Path to the merges file.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ slow_tokenizer_class = HerbertTokenizer
+
+ def __init__(
+ self,
+ vocab_file=None,
+ merges_file=None,
+ tokenizer_file=None,
+ cls_token="",
+ unk_token="",
+ pad_token="",
+ mask_token="",
+ sep_token="",
+ **kwargs,
+ ):
+ super().__init__(
+ vocab_file,
+ merges_file,
+ tokenizer_file=tokenizer_file,
+ cls_token=cls_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ sep_token=sep_token,
+ **kwargs,
+ )
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. An HerBERT, like BERT sequence has the following format:
+
+ - single sequence: ` X `
+ - pair of sequences: ` A B `
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ if token_ids_1 is None:
+ return cls + token_ids_0 + sep
+
+ return cls + token_ids_0 + sep + token_ids_1 + sep
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is None:
+ return [1] + ([0] * len(token_ids_0)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. HerBERT, like
+ BERT sequence pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
+ return tuple(files)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..22db7c013eaf7250f0f822a920716b0e19c6e7d0
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/__pycache__/configuration_idefics.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/__pycache__/configuration_idefics.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c5daf6806ddace287eac7576fde9e18a763111b9
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/__pycache__/configuration_idefics.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/__pycache__/perceiver.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/__pycache__/perceiver.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..795e76d8ade8e1cb9a5e09e8c3e92a13ccbc6b4a
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/__pycache__/perceiver.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/configuration_idefics.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/configuration_idefics.py
new file mode 100644
index 0000000000000000000000000000000000000000..07a92432aee3afc95aafe3a4bd5567ec861823af
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/configuration_idefics.py
@@ -0,0 +1,327 @@
+# coding=utf-8
+# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
+#
+# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
+# and OPT implementations in this library. It has been modified from its
+# original forms to accommodate minor architectural differences compared
+# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Idefics model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import IDEFICS_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class IdeficsVisionConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`IdeficsModel`]. It is used to instantiate an
+ Idefics model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the Idefics-9B.
+
+ e.g. [HuggingFaceM4/idefics-9b](https://huggingface.co/HuggingFaceM4/idefics-9b)
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer. (elsewhere referred to as `hidden_size`)
+ image_size (`int`, *optional*, defaults to 224):
+ The size (resolution) of each image.
+ intermediate_size (`int`, *optional*, defaults to 5120):
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ patch_size (`int`, *optional*, defaults to 14):
+ The size (resolution) of each patch.
+ num_hidden_layers (`int`, *optional*, defaults to 32):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ image_num_channels (`int`, *optional*, defaults to `3`):
+ Number of image channels.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-5):
+ The epsilon used by the layer normalization layers.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ initializer_factor (`float`, *optional*, defaults to 1.0):
+ A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization
+ testing).
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ """
+
+ model_type = "idefics"
+ attribute_map = {
+ "hidden_size": "embed_dim",
+ }
+
+ def __init__(
+ self,
+ embed_dim=768,
+ image_size=224,
+ intermediate_size=5120,
+ patch_size=14,
+ num_hidden_layers=32,
+ num_attention_heads=16,
+ num_channels=3,
+ hidden_act="gelu",
+ layer_norm_eps=1e-5,
+ attention_dropout=0.0,
+ initializer_range=0.02,
+ initializer_factor=1.0,
+ **kwargs,
+ ):
+ self.embed_dim = embed_dim
+ self.image_size = image_size
+ self.intermediate_size = intermediate_size
+ self.patch_size = patch_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.num_channels = num_channels
+ self.layer_norm_eps = layer_norm_eps
+ self.attention_dropout = attention_dropout
+ self.initializer_range = initializer_range
+ self.initializer_factor = initializer_factor
+ self.hidden_act = hidden_act
+
+ super().__init__(**kwargs)
+
+
+class IdeficsPerceiverConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`IdeficsModel`]. It is used to instantiate an
+ Idefics model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the Idefics-9B.
+
+ e.g. [HuggingFaceM4/idefics-9b](https://huggingface.co/HuggingFaceM4/idefics-9b)
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ use_resampler (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the resampler
+ resampler_n_latents (`int`, *optional*, defaults to ):
+ Number of latent embeddings to resample ("compress") the input sequence to (usually < 128).
+ resampler_depth (`int`, *optional*, defaults to 6):
+ Depth of the Perceiver Resampler (Transformer w/ cross attention). Should be shallow (< 3).
+ resampler_n_heads (`int`, *optional*, defaults to 16):
+ Number of heads in each Transformer block (for multi-headed self-attention).
+ resampler_head_dim (`int`, *optional*, defaults to 96):
+ Dimensionality of each head projection in the Transformer block.
+ qk_layer_norms_perceiver (`bool`, *optional*, defaults to `False`):
+ Whether or not to use qk layer norms in perceiver
+ """
+
+ model_type = "idefics"
+
+ def __init__(
+ self,
+ use_resampler=False,
+ resampler_n_latents=64,
+ resampler_depth=6,
+ resampler_n_heads=16,
+ resampler_head_dim=96,
+ qk_layer_norms_perceiver=False,
+ **kwargs,
+ ):
+ self.use_resampler = use_resampler
+ self.resampler_n_latents = resampler_n_latents
+ self.resampler_depth = resampler_depth
+ self.resampler_n_heads = resampler_n_heads
+ self.resampler_head_dim = resampler_head_dim
+ self.qk_layer_norms_perceiver = qk_layer_norms_perceiver
+
+ super().__init__(**kwargs)
+
+
+class IdeficsConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`IdeficsModel`]. It is used to instantiate an
+ Idefics model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the Idefics-9B.
+
+ e.g. [HuggingFaceM4/idefics-9b](https://huggingface.co/HuggingFaceM4/idefics-9b)
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ additional_vocab_size (`int`, *optional`, defaults to 0):
+ Additional vocabulary size of the model, typically for the special "
" token. Additional vocab tokens
+ are always trainable whereas regular vocab tokens can be frozen or not.
+ vocab_size (`int`, *optional*, defaults to 32000):
+ Vocabulary size of the Idefics model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`~IdeficsModel`]
+ hidden_size (`int`, *optional*, defaults to 4096):
+ Dimension of the hidden representations.
+ intermediate_size (`int`, *optional*, defaults to 11008):
+ Dimension of the MLP representations.
+ num_hidden_layers (`int`, *optional*, defaults to 32):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 32):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ dropout (`float`, *optional*, defaults to 0.0):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
+ The non-linear activation function (function or string) in the decoder.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ alpha_initializer (`str`, *optional*, defaults to `"zeros"`):
+ Initialization type for the alphas.
+ alphas_initializer_range (`float`, *optional*, defaults to 0.0):
+ The standard deviation of the truncated_normal_initializer for initializing the alphas in the Gated Cross
+ Attention.
+ alpha_type (`str`, *optional*, defaults to `"float"`):
+ Whether the gating alphas should be vectors or single floats.
+ rms_norm_eps (`float`, *optional*, defaults to 1e-6):
+ The epsilon used by the rms normalization layers.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`.
+ pad_token_id (`int`, *optional*, defaults to 0)
+ Padding token id.
+ bos_token_id (`int`, *optional*, defaults to 1)
+ Beginning of stream token id.
+ eos_token_id (`int`, *optional*, defaults to 2)
+ End of stream token id.
+ tie_word_embeddings(`bool`, *optional*, defaults to `False`):
+ Whether to tie weight embeddings
+ cross_layer_interval (`int`, *optional*, default to 1)
+ Interval for cross attention (from text to image) layers.
+ qk_layer_norms (`bool`, *optional*, defaults to `False`): Whether to add layer norm after q and k
+ freeze_text_layers (`bool`, *optional*, defaults to `True`): Whether to freeze text layers
+ freeze_text_module_exceptions (`bool`, *optional*, defaults to `[]`):
+ Exceptions to freezing text layers when `freeze_text_layers` is `True`
+ freeze_lm_head (`bool`, *optional*, defaults to `False`): Whether to freeze lm head
+ freeze_vision_layers (`bool`, *optional*, defaults to `True`): Whether to freeze vision layers
+ freeze_vision_module_exceptions (`bool`, *optional*, defaults to `[]`):
+ Exceptions to freezing vision layers when `freeze_vision_layers` is `True`
+ use_resampler (`bool`, *optional*, defaults to `False`): Whether to use the Resampler
+ vision_config (`IdeficsVisionConfig`, *optional*): Custom vision config or dict
+ perceiver_config (`IdeficsPerceiverConfig`, *optional*): Custom perceiver config or dict
+
+ Example:
+
+ ```python
+ >>> from transformers import IdeficsModel, IdeficsConfig
+
+ >>> # Initializing a Idefics idefics-9b style configuration
+ >>> configuration = IdeficsConfig()
+
+ >>> # Initializing a model from the idefics-9b style configuration
+ >>> model = IdeficsModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "idefics"
+ is_composition = False
+
+ def __init__(
+ self,
+ vocab_size=32000,
+ additional_vocab_size=0,
+ hidden_size=4096,
+ intermediate_size=11008,
+ num_hidden_layers=32,
+ num_attention_heads=32,
+ dropout=0.0,
+ hidden_act="silu",
+ initializer_range=0.02,
+ alpha_initializer="zeros",
+ alphas_initializer_range=0.0,
+ alpha_type="float",
+ rms_norm_eps=1e-6,
+ use_cache=True,
+ pad_token_id=0,
+ bos_token_id=1,
+ eos_token_id=2,
+ tie_word_embeddings=False,
+ cross_layer_interval=1,
+ qk_layer_norms=False,
+ freeze_text_layers=True,
+ freeze_text_module_exceptions=[],
+ freeze_lm_head=False,
+ freeze_vision_layers=True,
+ freeze_vision_module_exceptions=[],
+ use_resampler=False,
+ vision_config=None,
+ perceiver_config=None,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.additional_vocab_size = additional_vocab_size
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.dropout = dropout
+ self.hidden_act = hidden_act
+ self.initializer_range = initializer_range
+ self.alpha_initializer = alpha_initializer
+ self.alphas_initializer_range = alphas_initializer_range
+ self.alpha_type = alpha_type
+ self.rms_norm_eps = rms_norm_eps
+ self.use_cache = use_cache
+
+ self.cross_layer_interval = cross_layer_interval
+ self.qk_layer_norms = qk_layer_norms
+ self.freeze_vision_layers = freeze_vision_layers
+
+ self.freeze_text_layers = freeze_text_layers
+ self.freeze_text_module_exceptions = freeze_text_module_exceptions
+ self.freeze_vision_module_exceptions = freeze_vision_module_exceptions
+ self.freeze_lm_head = freeze_lm_head
+
+ self.use_resampler = use_resampler
+
+ if perceiver_config is None:
+ self.perceiver_config = IdeficsPerceiverConfig()
+ elif isinstance(perceiver_config, dict):
+ self.perceiver_config = IdeficsPerceiverConfig(**perceiver_config)
+ elif isinstance(perceiver_config, IdeficsPerceiverConfig):
+ self.perceiver_config = perceiver_config
+
+ if vision_config is None:
+ self.vision_config = IdeficsVisionConfig()
+ elif isinstance(vision_config, dict):
+ self.vision_config = IdeficsVisionConfig(**vision_config)
+ elif isinstance(vision_config, IdeficsVisionConfig):
+ self.vision_config = vision_config
+
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ tie_word_embeddings=tie_word_embeddings,
+ **kwargs,
+ )
+
+ # IMPORTANT: Do not do any __init__ args-based checks in the constructor, since
+ # PretrainedConfig.from_dict first instantiates the class with the config dict and only then
+ # updates the config object with `kwargs` from from_pretrained, so during the instantiation
+ # of this object many attributes have default values and haven't yet been overridden.
+ # Do any required checks inside `from_pretrained` once the superclass' `from_pretrained` was run.
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/image_processing_idefics.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/image_processing_idefics.py
new file mode 100644
index 0000000000000000000000000000000000000000..ee8dfbb4077c66de280f8ca60506250553ea305e
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/image_processing_idefics.py
@@ -0,0 +1,168 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Image processor class for Idefics."""
+
+from typing import Callable, Dict, List, Optional, Union
+
+from PIL import Image
+
+from ...image_processing_utils import BaseImageProcessor, BatchFeature
+from ...image_transforms import resize, to_channel_dimension_format
+from ...image_utils import (
+ ChannelDimension,
+ ImageInput,
+ PILImageResampling,
+ make_list_of_images,
+ to_numpy_array,
+ valid_images,
+)
+from ...utils import TensorType, is_torch_available
+
+
+IDEFICS_STANDARD_MEAN = [0.48145466, 0.4578275, 0.40821073]
+IDEFICS_STANDARD_STD = [0.26862954, 0.26130258, 0.27577711]
+
+
+def convert_to_rgb(image):
+ # `image.convert("RGB")` would only work for .jpg images, as it creates a wrong background
+ # for transparent images. The call to `alpha_composite` handles this case
+ if image.mode == "RGB":
+ return image
+
+ image_rgba = image.convert("RGBA")
+ background = Image.new("RGBA", image_rgba.size, (255, 255, 255))
+ alpha_composite = Image.alpha_composite(background, image_rgba)
+ alpha_composite = alpha_composite.convert("RGB")
+ return alpha_composite
+
+
+class IdeficsImageProcessor(BaseImageProcessor):
+ r"""
+ Constructs a Idefics image processor.
+
+ Args:
+ image_size (`int`, *optional*, defaults to 224):
+ Resize to image size
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IDEFICS_STANDARD_MEAN`):
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
+ overridden by the `image_mean` parameter in the `preprocess` method.
+ image_std (`float` or `List[float]`, *optional*, defaults to `IDEFICS_STANDARD_STD`):
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
+ Can be overridden by the `image_std` parameter in the `preprocess` method.
+ image_num_channels (`int`, *optional*, defaults to 3):
+ Number of image channels.
+ """
+
+ model_input_names = ["pixel_values"]
+
+ def __init__(
+ self,
+ image_size: int = 224,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ image_num_channels: Optional[int] = 3,
+ **kwargs,
+ ) -> None:
+ super().__init__(**kwargs)
+
+ self.image_size = image_size
+ self.image_num_channels = image_num_channels
+ self.image_mean = image_mean
+ self.image_std = image_std
+
+ def preprocess(
+ self,
+ images: ImageInput,
+ image_num_channels: Optional[int] = 3,
+ image_size: Optional[Dict[str, int]] = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ transform: Callable = None,
+ **kwargs,
+ ) -> TensorType.PYTORCH:
+ """
+ Preprocess a batch of images.
+
+ Args:
+ images (`ImageInput`):
+ A list of images to preprocess.
+ image_size (`int`, *optional*, defaults to `self.image_size`):
+ Resize to image size
+ image_num_channels (`int`, *optional*, defaults to `self.image_num_channels`):
+ Number of image channels.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IDEFICS_STANDARD_MEAN`):
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can
+ be overridden by the `image_mean` parameter in the `preprocess` method.
+ image_std (`float` or `List[float]`, *optional*, defaults to `IDEFICS_STANDARD_STD`):
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess`
+ method. Can be overridden by the `image_std` parameter in the `preprocess` method.
+ transform (`Callable`, *optional*, defaults to `None`):
+ A custom transform function that accepts a single image can be passed for training. For example,
+ `torchvision.Compose` can be used to compose multiple transforms. If `None` - an inference mode is
+ assumed - and then a preset of inference-specific transforms will be applied to the images
+
+ Returns:
+ a PyTorch tensor of the processed images
+
+ """
+ image_size = image_size if image_size is not None else self.image_size
+ image_num_channels = image_num_channels if image_num_channels is not None else self.image_num_channels
+ image_mean = image_mean if image_mean is not None else self.image_mean
+ image_std = image_std if image_std is not None else self.image_std
+ size = (image_size, image_size)
+
+ if isinstance(images, list) and len(images) == 0:
+ return []
+
+ images = make_list_of_images(images)
+
+ if not valid_images(images):
+ raise ValueError(
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+
+ # For training a user needs to pass their own set of transforms as a Callable.
+ # For reference this is what was used in the original IDEFICS training:
+ # transform = transforms.Compose([
+ # convert_to_rgb,
+ # transforms.RandomResizedCrop((size, size), scale=(0.9, 1.0), interpolation=transforms.InterpolationMode.BICUBIC),
+ # transforms.ToTensor(),
+ # transforms.Normalize(mean=image_mean, std=image_std),
+ # ])
+ if transform is not None:
+ if not is_torch_available():
+ raise ImportError("To pass in `transform` torch must be installed")
+ import torch
+
+ images = [transform(x) for x in images]
+ return torch.stack(images)
+
+ # for inference we do the exact transforms that were used to train IDEFICS
+ images = [convert_to_rgb(x) for x in images]
+ # further transforms expect numpy arrays
+ images = [to_numpy_array(x) for x in images]
+ images = [resize(x, size, resample=PILImageResampling.BICUBIC) for x in images]
+ images = [self.rescale(image=image, scale=1 / 255) for image in images]
+ images = [self.normalize(x, mean=image_mean, std=image_std) for x in images]
+ images = [to_channel_dimension_format(x, ChannelDimension.FIRST) for x in images]
+ # TODO: this converts to torch tensors - switch to convert_to_tensors once it becomes available
+ images = BatchFeature(data={"pixel_values": images}, tensor_type=TensorType.PYTORCH)["pixel_values"]
+
+ return images
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/modeling_idefics.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/modeling_idefics.py
new file mode 100644
index 0000000000000000000000000000000000000000..a01c2279c15586b86bc86e4a430da58c3e628c53
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/modeling_idefics.py
@@ -0,0 +1,1588 @@
+# coding=utf-8
+# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
+#
+# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
+# and OPT implementations in this library. It has been modified from its
+# original forms to accommodate minor architectural differences compared
+# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch Idefics model."""
+from dataclasses import dataclass
+from typing import Any, Dict, List, Optional, Tuple, Union
+
+import torch
+import torch.nn.functional as F
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import CrossEntropyLoss
+
+from ... import PreTrainedModel
+from ...activations import ACT2FN
+from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask_for_sdpa
+from ...modeling_outputs import ModelOutput
+from ...modeling_utils import PretrainedConfig
+from ...pytorch_utils import ALL_LAYERNORM_LAYERS
+from ...utils import (
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_idefics import IdeficsConfig
+from .perceiver import IdeficsPerceiverResampler
+from .vision import IdeficsVisionTransformer
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "IdeficsConfig"
+
+
+from ..deprecated._archive_maps import IDEFICS_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+@dataclass
+class IdeficsBaseModelOutputWithPast(ModelOutput):
+ """
+ Base class for Idefics model's outputs that may also contain a past key/values (to speed up sequential decoding).
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+
+ If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
+ hidden_size)` is output.
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
+ `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,
+ encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
+ `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
+ input) to speed up sequential decoding.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
+ Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
+ sequence_length, hidden_size)`.
+
+ image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver
+ """
+
+ last_hidden_state: torch.FloatTensor = None
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+ image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class IdeficsCausalLMOutputWithPast(ModelOutput):
+ """
+ Base class for Idefics causal language model (or autoregressive) outputs.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss (for next-token prediction).
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`)
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
+ `past_key_values` input) to speed up sequential decoding.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
+ Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
+ sequence_length, hidden_size)`.
+
+ image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ past_key_values: Optional[List[torch.FloatTensor]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+ image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+
+
+def expand_inputs_for_generation(
+ input_ids,
+ expand_size=1,
+ is_encoder_decoder=False,
+ attention_mask=None,
+ encoder_outputs=None,
+ **model_kwargs,
+):
+ expanded_return_idx = (
+ torch.arange(input_ids.shape[0]).view(-1, 1).repeat(1, expand_size).view(-1).to(input_ids.device)
+ )
+ input_ids = input_ids.index_select(0, expanded_return_idx)
+ model_kwargs["pixel_values"] = model_kwargs.get("pixel_values", None)
+ model_kwargs["image_encoder_embeddings"] = model_kwargs.get("image_encoder_embeddings", None)
+ model_kwargs["perceiver_embeddings"] = model_kwargs.get("perceiver_embeddings", None)
+ model_kwargs["image_attention_mask"] = model_kwargs.get("image_attention_mask", None)
+
+ if "token_type_ids" in model_kwargs:
+ token_type_ids = model_kwargs["token_type_ids"]
+ model_kwargs["token_type_ids"] = token_type_ids.index_select(0, expanded_return_idx)
+
+ if attention_mask is not None:
+ model_kwargs["attention_mask"] = attention_mask.index_select(0, expanded_return_idx)
+
+ if model_kwargs["image_attention_mask"] is not None:
+ model_kwargs["image_attention_mask"] = model_kwargs["image_attention_mask"].index_select(
+ 0, expanded_return_idx
+ )
+
+ if model_kwargs["pixel_values"] is not None:
+ model_kwargs["pixel_values"] = model_kwargs["pixel_values"].index_select(0, expanded_return_idx)
+
+ elif model_kwargs["image_encoder_embeddings"] is not None:
+ model_kwargs["image_encoder_embeddings"] = model_kwargs["image_encoder_embeddings"].index_select(
+ 0, expanded_return_idx
+ )
+
+ elif model_kwargs["perceiver_embeddings"] is not None:
+ model_kwargs["perceiver_embeddings"] = model_kwargs["perceiver_embeddings"].index_select(
+ 0, expanded_return_idx
+ )
+
+ return input_ids, model_kwargs
+
+
+def prepare_inputs_for_generation(input_ids, past_key_values=None, **kwargs):
+ token_type_ids = kwargs.get("token_type_ids", None)
+ # only last token for inputs_ids if past is defined in kwargs
+ if past_key_values:
+ input_ids = input_ids[:, -1].unsqueeze(-1)
+ if token_type_ids is not None:
+ token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
+
+ attention_mask = kwargs.get("attention_mask", None)
+ position_ids = kwargs.get("position_ids", None)
+
+ if attention_mask is not None and position_ids is None:
+ # create position_ids on the fly for batch generation
+ position_ids = attention_mask.long().cumsum(-1) - 1
+ position_ids.masked_fill_(attention_mask == 0, 1)
+ if past_key_values:
+ position_ids = position_ids[:, -1].unsqueeze(-1)
+
+ pixel_values = kwargs.get("pixel_values", None)
+ image_encoder_embeddings = kwargs.get("image_encoder_embeddings", None)
+ perceiver_embeddings = kwargs.get("perceiver_embeddings", None)
+ image_attention_mask = kwargs.get("image_attention_mask", None)
+ interpolate_pos_encoding = kwargs.get("interpolate_pos_encoding", False)
+
+ return {
+ "input_ids": input_ids,
+ "past_key_values": past_key_values,
+ "use_cache": kwargs.get("use_cache"),
+ "position_ids": position_ids,
+ "attention_mask": attention_mask,
+ "token_type_ids": token_type_ids,
+ "pixel_values": pixel_values,
+ "image_encoder_embeddings": image_encoder_embeddings,
+ "perceiver_embeddings": perceiver_embeddings,
+ "image_attention_mask": image_attention_mask,
+ "interpolate_pos_encoding": interpolate_pos_encoding,
+ }
+
+
+def freeze_model(model, module_exceptions=[]):
+ mapping = {
+ "LayerNorm": nn.LayerNorm,
+ "Linear": nn.Linear,
+ "Embedding": nn.Embedding,
+ }
+ module_exceptions_mapped = [mapping[m] for m in module_exceptions]
+ for module in model.modules():
+ if module_exceptions and any(isinstance(module, t) for t in module_exceptions_mapped):
+ module.requires_grad_(True) # Explicitely setting it to true to avoid any mistakes
+ else:
+ module.requires_grad_(False)
+ return model
+
+
+class IdeficsDecoupledEmbedding(nn.Embedding):
+ # Derived from https://pytorch.org/docs/stable/_modules/torch/nn/modules/sparse.html#Embedding
+ """
+ Implements a decoupling of parameters to allow freezing (or not) a subset of the embeddings. In practise, the
+ regular `weight` can be trained or frozen (i.e. `partially_freeze=True`), and if `num_additional_embeddings` > 0,
+ then it will create `num_additional_embeddings` additional parameters that are always trained. If
+ `num_additional_embeddings=0`, then the module defaults back to the regular behavior of `nn.Embedding`.
+ """
+
+ def __init__(
+ self,
+ num_embeddings,
+ num_additional_embeddings,
+ embedding_dim,
+ partially_freeze: Optional[bool] = False,
+ device=None,
+ dtype=None,
+ padding_idx=None,
+ **kwargs,
+ ) -> None:
+ """
+ Args:
+ num_embeddings (`int`):
+ Size of the dictionary of embeddings
+ num_additional_embeddings (`int`):
+ Number of additional embeddings. Only useful when you `partially_freeze=True`.
+ embedding_dim (`int`):
+ The size of each embedding vector
+ partially_freeze: (`bool`, *optional*, defaults to `False`):
+ If `True`, the regular `weight` will be frozen. `additional_weight` is never frozen.
+ padding_idx (`int`, *optional*):
+ The padding index (needs to be less than num_embeddings)
+
+ Note: there are a lot of other parameters to initialize a standard `nn.Embedding` such as `padding_idx`,
+ `max_norm` or `norm_type`. We are not supporting these.
+ """
+ if padding_idx is not None and padding_idx > num_embeddings:
+ raise ValueError(f"padding_idx must be within num_embeddings. Got {padding_idx} and {num_embeddings}")
+ super().__init__(
+ num_embeddings=num_embeddings,
+ embedding_dim=embedding_dim,
+ device=device,
+ dtype=dtype,
+ padding_idx=padding_idx,
+ **kwargs,
+ )
+ self.num_embeddings = num_embeddings
+ self.padding_idx = padding_idx
+ self.num_additional_embeddings = num_additional_embeddings
+ self.partially_freeze = partially_freeze
+
+ if partially_freeze:
+ self.weight.requires_grad_(False)
+
+ if self.num_additional_embeddings > 0:
+ self.additional_embedding = nn.Embedding(
+ num_embeddings=self.num_additional_embeddings,
+ embedding_dim=embedding_dim,
+ device=device,
+ dtype=dtype,
+ )
+
+ def forward(self, input_ids):
+ """
+ we have 2 embeddings, with different indices - one pretrained self.weight and another
+ self.additional_embedding.weight that is being trained.
+
+ in order to make a lookup of the input ids, we:
+ 1. find out the indices of the entries belonging to the 2nd embedding
+ 2. extract those values while subtracting the size of the first embedding (num_embeddings), since the 2nd
+ embedding starts from 0 and not num_embeddings
+ 3. perform the 2nd embedding lookup
+ 4. now we handle the 1st embedding, we overwrite indices belonging to the 2nd embedding with a padding index
+ 5. perform the 1st embedding lookup
+ 6. now we overwrite the values in the 1st embedding lookup with the values of the 2nd embedding lookup
+
+ note: for the 1st embedding lookup we could have looked up only the low indices and not do the padding, but
+ then we have to create a new tensor and populate it with 2 tensors that are spread out across various indices -
+ i.e. not a simple concat - I haven't benchmarked the complex case if it's any faster, given that seqlens are
+ usually relatively short it's probably not faster or if faster not by much - but might be a good idea to
+ measure.
+
+ """
+ if self.num_additional_embeddings == 0:
+ return F.embedding(input_ids, self.weight)
+
+ # Clone so that we don't modify the original input_ids later on
+ input_ids = input_ids.clone()
+ additional_vocab_indices = torch.where(input_ids >= self.num_embeddings)
+ input_ids_additional_vocab = input_ids[additional_vocab_indices]
+ additional_embeddings = self.additional_embedding(input_ids_additional_vocab - self.num_embeddings)
+
+ # for successful lookup replace input_ids with 0, the results of these will be discarded anyway
+ input_ids[additional_vocab_indices] = 0
+ full_vector = F.embedding(input_ids, self.weight)
+
+ # overwrite the records with high indices
+ full_vector[additional_vocab_indices] = additional_embeddings
+
+ return full_vector
+
+ def extra_repr(self) -> str:
+ return "num_embeddings={}, num_additional_embeddings={}, embedding_dim={}, partially_freeze={}".format(
+ self.num_embeddings,
+ self.num_additional_embeddings,
+ self.embedding_dim,
+ self.partially_freeze,
+ )
+
+
+class IdeficsDecoupledLinear(nn.Linear):
+ # Derived from https://pytorch.org/docs/stable/_modules/torch/nn/modules/linear.html#Linear
+ """
+ Implements a decoupling of parameters to allow freezing (or not) a subset of the parameters. In practise, the
+ regular `weight` can be trained or frozen (i.e. `partially_freeze=True`), and if `out_additional_features` > 0,
+ then it will create `out_additional_features * in_features` additional parameters that are always trained. If
+ `out_additional_features=0`, then the module defaults back to the regular behavior of `nn.Linear`.
+ """
+
+ def __init__(
+ self,
+ in_features: int,
+ out_features: int,
+ out_additional_features: int = 0,
+ bias: bool = True,
+ partially_freeze: bool = True,
+ device=None,
+ dtype=None,
+ ) -> None:
+ """
+ out_additional_features: int. Number of additional trainable dimensions. Only makes sense when
+ `partially_freeze=True`. partially_freeze: bool. If True, the regular `weight` will be frozen and extra
+ parameters (if any) will be trainable. If False, default to the regular behavior of nn.Linear.
+ """
+ super().__init__(in_features, out_features, bias, device, dtype)
+ self.out_additional_features = out_additional_features
+ self.partially_freeze = partially_freeze
+
+ self.in_features = in_features
+ self.out_features = out_features
+
+ if partially_freeze:
+ self.weight.requires_grad_(False)
+ if bias:
+ self.bias.requires_grad_(False)
+
+ if out_additional_features > 0:
+ self.additional_fc = nn.Linear(
+ in_features=in_features,
+ out_features=out_additional_features,
+ bias=bias,
+ device=device,
+ dtype=dtype,
+ )
+
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
+ output = F.linear(input, self.weight, self.bias)
+
+ if self.out_additional_features > 0:
+ additional_features = self.additional_fc(input)
+ output = torch.cat((output, additional_features), -1)
+
+ return output
+
+ def extra_repr(self) -> str:
+ """Overwriting `nn.Linear.extra_repr` to include new parameters."""
+ return "in_features={}, out_features={}, out_additional_features={}, bias={}, partially_freeze={}".format(
+ self.in_features,
+ self.out_features,
+ self.out_additional_features,
+ self.bias is not None,
+ self.partially_freeze,
+ )
+
+
+# this was adapted from LlamaRMSNorm
+class IdeficsRMSNorm(nn.Module):
+ def __init__(self, hidden_size, eps=1e-6):
+ """
+ IdeficsRMSNorm is equivalent to T5LayerNorm
+ """
+ super().__init__()
+ self.weight = nn.Parameter(torch.ones(hidden_size))
+ self.variance_epsilon = eps
+
+ def forward(self, hidden_states):
+ variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
+
+ # convert into half-precision if necessary
+ if self.weight.dtype in [torch.float16, torch.bfloat16]:
+ hidden_states = hidden_states.to(self.weight.dtype)
+
+ return self.weight * hidden_states
+
+
+ALL_LAYERNORM_LAYERS.append(IdeficsRMSNorm)
+
+
+# this was adapted from LlamaRotaryEmbedding
+class IdeficsEmbedding(torch.nn.Module):
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
+ super().__init__()
+
+ self.dim = dim
+ self.max_position_embeddings = max_position_embeddings
+ self.base = base
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
+
+ # Build here to make `torch.jit.trace` work.
+ self._set_cos_sin_cache(
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
+ )
+
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
+ self.max_seq_len_cached = seq_len
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
+
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
+ emb = torch.cat((freqs, freqs), dim=-1)
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
+
+ def forward(self, x, seq_len=None):
+ # x: [bs, num_attention_heads, seq_len, head_size]
+ if seq_len > self.max_seq_len_cached:
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
+
+ return (
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
+ )
+
+
+def rotate_half(x):
+ """Rotates half the hidden dims of the input."""
+ x1 = x[..., : x.shape[-1] // 2]
+ x2 = x[..., x.shape[-1] // 2 :]
+ return torch.cat((-x2, x1), dim=-1)
+
+
+# Copied from transformers.models.mistral.modeling_mistral.apply_rotary_pos_emb
+def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
+ """Applies Rotary Position Embedding to the query and key tensors.
+
+ Args:
+ q (`torch.Tensor`): The query tensor.
+ k (`torch.Tensor`): The key tensor.
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
+ position_ids (`torch.Tensor`):
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
+ used to pass offsetted position ids when working with a KV-cache.
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
+ Returns:
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
+ """
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
+ q_embed = (q * cos) + (rotate_half(q) * sin)
+ k_embed = (k * cos) + (rotate_half(k) * sin)
+ return q_embed, k_embed
+
+
+# this was adapted from LlamaMLP
+class IdeficsMLP(nn.Module):
+ def __init__(
+ self,
+ hidden_size: int,
+ intermediate_size: int,
+ hidden_act: str,
+ ):
+ super().__init__()
+ self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
+ self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False)
+ self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
+ self.act_fn = ACT2FN[hidden_act]
+
+ def forward(self, x):
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
+
+
+# this was adapted from LlamaAttention
+class IdeficsAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(
+ self,
+ hidden_size: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ is_cross_attention: bool = False,
+ config: PretrainedConfig = None,
+ qk_layer_norms: bool = False,
+ ):
+ super().__init__()
+ self.hidden_size = hidden_size
+ self.num_heads = num_heads
+ self.head_dim = hidden_size // num_heads
+ self.dropout = dropout
+ self.is_causal = True
+
+ if (self.head_dim * num_heads) != self.hidden_size:
+ raise ValueError(
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
+ f" and `num_heads`: {num_heads})."
+ )
+
+ self.is_cross_attention = is_cross_attention
+
+ if not hasattr(nn.functional, "scaled_dot_product_attention"):
+ raise ValueError("this model requires pytorch 2.0 or higher")
+
+ if self.is_cross_attention:
+ kv_input_dim = (
+ self.hidden_size if not hasattr(config.vision_config, "embed_dim") else config.vision_config.embed_dim
+ )
+ self.q_proj = nn.Linear(
+ self.hidden_size,
+ num_heads * self.head_dim,
+ bias=False,
+ )
+ self.k_proj = nn.Linear(kv_input_dim, num_heads * self.head_dim, bias=False)
+ self.v_proj = nn.Linear(
+ kv_input_dim,
+ num_heads * self.head_dim,
+ bias=False,
+ )
+ else:
+ self.q_proj = nn.Linear(
+ self.hidden_size,
+ num_heads * self.head_dim,
+ bias=False,
+ )
+ self.k_proj = nn.Linear(
+ self.hidden_size,
+ num_heads * self.head_dim,
+ bias=False,
+ )
+ self.v_proj = nn.Linear(
+ self.hidden_size,
+ num_heads * self.head_dim,
+ bias=False,
+ )
+ self.o_proj = nn.Linear(
+ num_heads * self.head_dim,
+ hidden_size,
+ bias=False,
+ )
+ self.rotary_emb = IdeficsEmbedding(self.head_dim)
+
+ self.qk_layer_norms = qk_layer_norms
+ if self.qk_layer_norms:
+ self.q_layer_norm = IdeficsRMSNorm(self.head_dim, eps=config.rms_norm_eps)
+ self.k_layer_norm = IdeficsRMSNorm(self.head_dim, eps=config.rms_norm_eps)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ key_value_states: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ is_cross_attention = self.is_cross_attention or key_value_states is not None
+
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ if not is_cross_attention:
+ key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ else:
+ _, kv_len, _ = key_value_states.size() # Note that, in this case, `kv_len` == `kv_seq_len`
+ key_states = self.k_proj(key_value_states).view(bsz, kv_len, self.num_heads, self.head_dim).transpose(1, 2)
+ value_states = (
+ self.v_proj(key_value_states).view(bsz, kv_len, self.num_heads, self.head_dim).transpose(1, 2)
+ )
+
+ kv_seq_len = key_states.shape[-2]
+ if past_key_value is not None:
+ kv_seq_len += past_key_value[0].shape[-2]
+ if not is_cross_attention:
+ cos, sin = self.rotary_emb(value_states, seq_len=max(kv_seq_len, q_len))
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
+ # [bsz, nh, t, hd]
+
+ if past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
+
+ past_key_value = (key_states, value_states) if use_cache else None
+
+ if self.qk_layer_norms:
+ query_states = self.q_layer_norm(query_states)
+ key_states = self.k_layer_norm(key_states)
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
+ )
+
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
+ if query_states.device.type == "cuda" and attention_mask is not None:
+ query_states = query_states.contiguous()
+ key_states = key_states.contiguous()
+ value_states = value_states.contiguous()
+
+ attn_output = nn.functional.scaled_dot_product_attention(
+ query_states,
+ key_states,
+ value_states,
+ attn_mask=attention_mask,
+ dropout_p=self.dropout,
+ # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
+ is_causal=self.is_causal and attention_mask is None and q_len > 1,
+ )
+
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.transpose(1, 2)
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
+
+ attn_output = self.o_proj(attn_output)
+
+ attn_weights = None
+ if output_attentions:
+ logger.warning_once(
+ "attn_weights are not extracted in scaled_dot_product_attention. The model returns None instead"
+ )
+
+ return attn_output, attn_weights, past_key_value
+
+
+# this was adapted from LlamaDecoderLayer
+class IdeficsDecoderLayer(nn.Module):
+ def __init__(self, config: IdeficsConfig):
+ super().__init__()
+ self.hidden_size = config.hidden_size
+ self.self_attn = IdeficsAttention(
+ hidden_size=self.hidden_size,
+ num_heads=config.num_attention_heads,
+ dropout=config.dropout,
+ config=config,
+ )
+ self.mlp = IdeficsMLP(
+ hidden_size=self.hidden_size,
+ intermediate_size=config.intermediate_size,
+ hidden_act=config.hidden_act,
+ )
+ self.input_layernorm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+ self.post_attention_layernorm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+ self.dropout = config.dropout
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = False,
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
+ """
+
+ residual = hidden_states
+
+ hidden_states = self.input_layernorm(hidden_states)
+
+ # Self Attention
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.post_attention_layernorm(hidden_states)
+ hidden_states = self.mlp(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights,)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ return outputs
+
+
+class IdeficsGatedCrossAttentionLayer(nn.Module):
+ def __init__(self, config: IdeficsConfig):
+ super().__init__()
+ self.hidden_size = config.hidden_size
+ self.cross_attn = IdeficsAttention(
+ hidden_size=self.hidden_size,
+ num_heads=config.num_attention_heads,
+ is_cross_attention=True,
+ dropout=config.dropout,
+ config=config,
+ qk_layer_norms=config.qk_layer_norms,
+ )
+ self.mlp = IdeficsMLP(
+ hidden_size=self.hidden_size,
+ intermediate_size=config.intermediate_size,
+ hidden_act=config.hidden_act,
+ )
+ self.input_layernorm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+ self.post_attention_layernorm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+ self.config = config.dropout
+
+ self.act_cross_attn = nn.Tanh()
+ self.act_dense = nn.Tanh()
+
+ if config.alpha_initializer == "zeros":
+ if config.alpha_type == "vector":
+ self.alpha_cross_attn = nn.Parameter(torch.zeros(1, 1, self.hidden_size))
+ self.alpha_dense = nn.Parameter(torch.zeros(1, 1, self.hidden_size))
+ elif config.alpha_type == "float":
+ self.alpha_cross_attn = nn.Parameter(torch.zeros(1))
+ self.alpha_dense = nn.Parameter(torch.zeros(1))
+ else:
+ raise ValueError(f"Unknown value for `alpha_type` ({config.alpha_type})")
+
+ elif config.alpha_initializer == "ones":
+ if config.alpha_type == "vector":
+ self.alpha_cross_attn = nn.Parameter(torch.ones(1, 1, self.hidden_size))
+ self.alpha_dense = nn.Parameter(torch.ones(1, 1, self.hidden_size))
+ elif config.alpha_type == "float":
+ self.alpha_cross_attn = nn.Parameter(torch.ones(1))
+ self.alpha_dense = nn.Parameter(torch.ones(1))
+ else:
+ raise ValueError(f"Unknown value for `alpha_type` ({config.alpha_type})")
+
+ elif config.alpha_initializer in {"normal", "gaussian", "random"}:
+ if config.alpha_type == "vector":
+ self.alpha_cross_attn = nn.Parameter(
+ torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1, 1, self.hidden_size))
+ )
+ self.alpha_dense = nn.Parameter(
+ torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1, 1, self.hidden_size))
+ )
+ elif config.alpha_type == "float":
+ self.alpha_cross_attn = nn.Parameter(
+ torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1))
+ )
+ self.alpha_dense = nn.Parameter(torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1)))
+ else:
+ raise ValueError(f"Unknown value for `alpha_type` ({config.alpha_type})")
+
+ else:
+ raise NotImplementedError(f"Alpha initialization scheme {config.alpha_initializer} not yet implemented!")
+
+ if not (hasattr(self, "alpha_cross_attn") and hasattr(self, "alpha_dense")):
+ raise ValueError("Alpha parameters not initialized correctly!")
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ image_hidden_states: Optional[torch.Tensor] = None,
+ image_attention_mask: Optional[torch.Tensor] = None,
+ cross_attention_gate: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = False,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ image_attention_mask (`torch.FloatTensor`, *optional*): image attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ cross_attention_gate (`torch.FloatTensor`, *optional*):
+ gate of size `(batch, seq_len)` used to zero-out cross-attention output for tokens attending no images.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
+ """
+ if image_hidden_states is None:
+ raise ValueError(
+ "`image_hidden_states` is required for Idefics cross attention module which are visual features to be"
+ " conditioned on."
+ )
+
+ if cross_attention_gate is None:
+ raise ValueError(
+ "`cross_attention_gate` is required for Idefics cross attention module to zero-out the cross-attention hidden_states attending to no images."
+ )
+
+ if past_key_value is not None:
+ raise NotImplementedError("Past key value states are not implemented for Idefics cross attention module.")
+
+ residual = hidden_states
+
+ hidden_states = self.input_layernorm(hidden_states)
+
+ # Self Attention
+ hidden_states, self_attn_weights, present_key_value = self.cross_attn(
+ hidden_states=hidden_states,
+ key_value_states=image_hidden_states,
+ attention_mask=image_attention_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.config, training=self.training)
+ # Fill in zeros for cross_attention hidden_states of tokens attending to no images
+ hidden_states[cross_attention_gate == 0] = hidden_states[cross_attention_gate == 0].fill_(0)
+ hidden_states = residual + self.act_cross_attn(self.alpha_cross_attn) * hidden_states
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.post_attention_layernorm(hidden_states)
+ hidden_states = self.mlp(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.config, training=self.training)
+ hidden_states = residual + self.act_dense(self.alpha_dense) * hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights,)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ return outputs
+
+
+LLAMA_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`IdeficsConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+@add_start_docstrings(
+ "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
+ LLAMA_START_DOCSTRING,
+)
+class IdeficsPreTrainedModel(PreTrainedModel):
+ config_class = IdeficsConfig
+ base_model_prefix = "model"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["IdeficsDecoderLayer", "IdeficsGatedCrossAttentionLayer"]
+ _supports_sdpa = True
+
+ def _init_weights(self, module):
+ # important: this ported version of Idefics isn't meant for training from scratch - only
+ # inference and fine-tuning - so the proper init weights code has been removed - the m4 code
+ # base should be used for training from scratch and it contains the correct code.
+ std = self.config.initializer_range
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+ # Adapted from transformers.modeling_utils.PreTrainedModel._check_and_enable_sdpa
+ @classmethod
+ def _check_and_enable_sdpa(cls, config, hard_check_only: bool = False) -> PretrainedConfig:
+ # We remove the checks on `is_torch_sdpa_available()` and `cls._supports_sdpa` as Falcon supports SDPA from torch==2.0.0 (no requirement on 2.1).
+ _is_bettertransformer = getattr(cls, "use_bettertransformer", False)
+ if _is_bettertransformer:
+ return config
+
+ if not hard_check_only:
+ config._attn_implementation = "sdpa"
+ return config
+
+
+LLAMA_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
+ information on the default strategy.
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
+ LLAMA_START_DOCSTRING,
+)
+class IdeficsModel(IdeficsPreTrainedModel):
+ """
+ Transformer decoder consisting of `config.num_hidden_layers` layers. Each layer is a [`IdeficsDecoderLayer`]
+
+ Args:
+ config: IdeficsConfig
+ """
+
+ def __init__(self, config: IdeficsConfig):
+ super().__init__(config)
+ self.config = config
+ self.padding_idx = config.pad_token_id
+ self.vocab_size = config.vocab_size
+
+ self.embed_tokens = IdeficsDecoupledEmbedding(
+ num_embeddings=config.vocab_size,
+ num_additional_embeddings=config.additional_vocab_size,
+ embedding_dim=config.hidden_size,
+ partially_freeze=config.freeze_text_layers,
+ padding_idx=self.padding_idx,
+ )
+
+ self.image_size = config.vision_config.image_size
+ self.vision_config = config.vision_config
+ self.vision_model = IdeficsVisionTransformer(config.vision_config)
+
+ # Perceiver Resampler
+ if config.use_resampler:
+ perceiver_config = config.perceiver_config
+ self.perceiver_resampler = IdeficsPerceiverResampler(
+ config,
+ config.vision_config.embed_dim,
+ perceiver_config.resampler_depth,
+ perceiver_config.resampler_n_heads,
+ perceiver_config.resampler_head_dim,
+ perceiver_config.resampler_n_latents,
+ )
+
+ self.layers = nn.ModuleList([IdeficsDecoderLayer(config) for _ in range(config.num_hidden_layers)])
+
+ self.cross_layer_interval = config.cross_layer_interval
+ num_cross_layers = config.num_hidden_layers // self.cross_layer_interval
+ self.gated_cross_attn_layers = nn.ModuleList(
+ [IdeficsGatedCrossAttentionLayer(config) for _ in range(num_cross_layers)]
+ )
+ self.gradient_checkpointing = False
+
+ self.norm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ self.freeze_relevant_params(config)
+
+ def freeze_relevant_params(self, config=None):
+ if config is None:
+ config = self.config
+
+ if config.freeze_text_layers:
+ self.freeze_text_layers(config.freeze_text_module_exceptions)
+
+ if config.freeze_vision_layers:
+ freeze_model(self.vision_model, module_exceptions=config.freeze_vision_module_exceptions)
+
+ def freeze_text_layers(self, module_exceptions=[]):
+ for module in [self.layers, self.norm]:
+ freeze_model(module, module_exceptions=module_exceptions)
+
+ def freeze_vision_layers(self, module_exceptions=[]):
+ freeze_model(self.vision_model, module_exceptions=module_exceptions)
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.embed_tokens = value
+
+ @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ image_encoder_embeddings: Optional[torch.FloatTensor] = None,
+ perceiver_embeddings: Optional[torch.FloatTensor] = None,
+ image_attention_mask: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ interpolate_pos_encoding: Optional[bool] = False,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, IdeficsBaseModelOutputWithPast]:
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # retrieve input_ids and inputs_embeds
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
+ elif input_ids is not None:
+ batch_size, seq_length = input_ids.shape
+ elif inputs_embeds is not None:
+ batch_size, seq_length, _ = inputs_embeds.shape
+ else:
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
+
+ seq_length_with_past = seq_length
+ past_key_values_length = 0
+
+ if past_key_values is not None:
+ past_key_values_length = past_key_values[0][0].shape[2]
+ seq_length_with_past = seq_length_with_past + past_key_values_length
+
+ if attention_mask is not None and position_ids is None:
+ # create position_ids on the fly for batch generation
+ position_ids = attention_mask.long().cumsum(-1) - 1
+ position_ids.masked_fill_(attention_mask == 0, 1)
+ elif position_ids is None:
+ position_ids = torch.arange(
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
+ )
+ position_ids = position_ids.unsqueeze(0)
+
+ if (pixel_values, image_encoder_embeddings, perceiver_embeddings).count(None) != 2:
+ raise ValueError(
+ "Exactly 1 of pixel_values, image_encoder_embeddings or perceiver_embeddings has to be not-None."
+ )
+
+ elif pixel_values is not None:
+ pixel_values = pixel_values.to(dtype=self.dtype, device=device) # fp16 compatibility
+ batch_size, num_images = pixel_values.shape[:2]
+ pixel_values = pixel_values.contiguous().view(batch_size * num_images, *pixel_values.shape[2:])
+
+ # Get sequence from the vision encoder
+ image_hidden_states = self.vision_model(
+ pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding
+ ).last_hidden_state
+
+ elif image_encoder_embeddings is not None:
+ batch_size, num_images, image_seq_len, image_hidden_size = image_encoder_embeddings.size()
+ image_hidden_states = image_encoder_embeddings.to(dtype=self.dtype, device=device)
+ image_hidden_states = image_hidden_states.view(batch_size * num_images, image_seq_len, image_hidden_size)
+
+ if self.config.use_resampler:
+ if perceiver_embeddings is None:
+ perceiver_embeddings = self.perceiver_resampler(image_hidden_states)
+ image_seq_len, image_hidden_size = perceiver_embeddings.size(1), perceiver_embeddings.size(2)
+ else:
+ batch_size, num_images, image_seq_len, image_hidden_size = perceiver_embeddings.size()
+ image_hidden_states = perceiver_embeddings
+ elif perceiver_embeddings is None:
+ image_seq_len, image_hidden_size = image_hidden_states.size(1), image_hidden_states.size(2)
+ else:
+ raise ValueError("If `perceiver_embeddings` are passed, use_resampler should be True")
+
+ image_hidden_states = image_hidden_states.view(batch_size, num_images * image_seq_len, image_hidden_size)
+ # # Hack to use the model in full language modeling mode
+ # image_attention_mask = torch.zeros(batch_size, seq_length, 1, dtype=torch.long, device=image_hidden_states.device)
+ # Make image_attention_mask compatible with hidden states
+ text_seq_len = image_attention_mask.size(1)
+ image_attention_mask = image_attention_mask.unsqueeze(-1)
+ image_attention_mask = image_attention_mask.repeat(1, 1, 1, image_seq_len)
+ image_attention_mask = image_attention_mask.view(batch_size, text_seq_len, num_images * image_seq_len)
+
+ if image_hidden_states is not None:
+ image_batch_size, image_sequence_length, _ = image_hidden_states.size()
+ image_hidden_shape = (image_batch_size, image_sequence_length)
+ if image_attention_mask is None:
+ image_attention_mask = torch.ones(image_hidden_shape, device=device)
+ image_attention_mask = self.invert_attention_mask(image_attention_mask)
+ else:
+ image_attention_mask = None
+
+ # cross_attention_gate:
+ # For any tokens attending to no images, the hidden_states comming out of the cross-attention should be zeroed-out.
+ # `image_attention_mask` has shape [bsz, 1, num_images, hidden_size] with elements equal to either 0.0 or a very negative number.
+ # If any of the elements are 0.0, then the token is attending to at least one image and the gate value is 1. Otherwise the gate value is 0.
+ # `cross_attention_gate` has shape [bsz, seq_len] with elements equal to either 0.0 or 1.0.
+ cross_attention_gate = ((((image_attention_mask == 0.0).any(dim=-1)).to(dtype=self.dtype)).squeeze(dim=1)).to(
+ device
+ )
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids)
+ # embed positions
+ if attention_mask is None:
+ attention_mask = torch.ones(
+ (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
+ )
+ attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
+ )
+
+ hidden_states = inputs_embeds
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ next_decoder_cache = () if use_cache else None
+
+ for idx, decoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+
+ def vblock(
+ main_block,
+ hidden_states,
+ attention_mask,
+ position_ids,
+ past_key_value,
+ image_hidden_states,
+ image_attention_mask,
+ cross_attention_gate,
+ output_attentions,
+ use_cache,
+ layer_idx,
+ cross_layer_interval,
+ gated_cross_attn_layers,
+ ):
+ # TODO(ls): Add cross attention values to respective lists
+ if layer_idx % cross_layer_interval == 0:
+ xblock = gated_cross_attn_layers[layer_idx // cross_layer_interval]
+ outputs = xblock(
+ hidden_states,
+ attention_mask=attention_mask,
+ image_hidden_states=image_hidden_states,
+ image_attention_mask=image_attention_mask,
+ cross_attention_gate=cross_attention_gate,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ past_key_value=None, # not implemented
+ )
+ hidden_states = outputs[0]
+
+ layer_outputs = main_block(
+ hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+
+ return layer_outputs
+
+ if self.gradient_checkpointing and self.training:
+ past_key_value = None
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ layer_outputs = self._gradient_checkpointing_func(
+ vblock,
+ decoder_layer,
+ hidden_states,
+ attention_mask,
+ position_ids,
+ past_key_value,
+ image_hidden_states,
+ image_attention_mask,
+ cross_attention_gate,
+ output_attentions,
+ use_cache,
+ idx,
+ self.cross_layer_interval,
+ self.gated_cross_attn_layers,
+ )
+ else:
+ layer_outputs = vblock(
+ decoder_layer,
+ hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ image_hidden_states=image_hidden_states,
+ image_attention_mask=image_attention_mask,
+ cross_attention_gate=cross_attention_gate,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ layer_idx=idx,
+ cross_layer_interval=self.cross_layer_interval,
+ gated_cross_attn_layers=self.gated_cross_attn_layers,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ hidden_states = self.norm(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ next_cache = next_decoder_cache if use_cache else None
+ image_hidden_states = image_hidden_states.view(batch_size, num_images, image_seq_len, image_hidden_size)
+ if not return_dict:
+ return tuple(
+ v
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, image_hidden_states]
+ if v is not None
+ )
+ return IdeficsBaseModelOutputWithPast(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ image_hidden_states=image_hidden_states,
+ )
+
+
+class IdeficsForVisionText2Text(IdeficsPreTrainedModel):
+ _keys_to_ignore_on_load_missing = [r"lm_head.weight"]
+ _tied_weights_keys = ["model.embed_tokens.weight", "lm_head.weight"]
+
+ def __init__(self, config, vision_model=None):
+ super().__init__(config)
+ self.model = IdeficsModel(config)
+
+ self.lm_head = IdeficsDecoupledLinear(
+ in_features=config.hidden_size,
+ out_features=config.vocab_size,
+ out_additional_features=config.additional_vocab_size,
+ bias=False,
+ partially_freeze=config.freeze_lm_head,
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.embed_tokens = value
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def set_decoder(self, decoder):
+ self.model = decoder
+
+ def get_decoder(self):
+ return self.model
+
+ def tie_weights(self):
+ """
+ Overwrite `transformers.modeling_utils.PreTrainedModel.tie_weights` to handle the case of
+ IdeficsDecoupledLinear and IdeficsDecoupledEmbedding.
+ """
+ output_embeddings = self.get_output_embeddings()
+ input_embeddings = self.get_input_embeddings()
+
+ if getattr(self.config, "tie_word_embeddings", True):
+ output_embeddings.weight = input_embeddings.weight
+ if input_embeddings.num_additional_embeddings > 0:
+ assert output_embeddings.out_additional_features == input_embeddings.num_additional_embeddings
+ output_embeddings.additional_fc.weight = input_embeddings.additional_embedding.weight
+
+ if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"):
+ output_embeddings.out_features = input_embeddings.num_embeddings
+ if hasattr(output_embeddings, "out_additional_features") and hasattr(
+ input_embeddings, "num_additional_embeddings"
+ ):
+ output_embeddings.out_additional_features = input_embeddings.num_additional_embeddings
+
+ @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=IdeficsCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ image_encoder_embeddings: Optional[torch.FloatTensor] = None,
+ perceiver_embeddings: Optional[torch.FloatTensor] = None,
+ image_attention_mask: Optional[torch.Tensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ interpolate_pos_encoding: Optional[bool] = False,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, IdeficsCausalLMOutputWithPast]:
+ r"""
+ Args:
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoProcessor, IdeficsForVisionText2Text
+
+ >>> model = IdeficsForVisionText2Text.from_pretrained("HuggingFaceM4/idefics-9b")
+ >>> processor = AutoProcessor.from_pretrained("HuggingFaceM4/idefics-9b")
+
+ >>> dogs_image_url_1 = "https://huggingface.co/datasets/hf-internal-testing/fixtures_nlvr2/raw/main/image1.jpeg"
+ >>> dogs_image_url_2 = "https://huggingface.co/datasets/hf-internal-testing/fixtures_nlvr2/raw/main/image2.jpeg"
+
+ >>> prompts = [
+ ... [
+ ... "User:",
+ ... dogs_image_url_1,
+ ... "Describe this image.\nAssistant: An image of two dogs.\n",
+ ... "User:",
+ ... dogs_image_url_2,
+ ... "Describe this image.\nAssistant:",
+ ... ]
+ ... ]
+ >>> inputs = processor(prompts, return_tensors="pt")
+ >>> generate_ids = model.generate(**inputs, max_new_tokens=6)
+ >>> processor.batch_decode(generate_ids, skip_special_tokens=True)
+ ```"""
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
+ outputs = self.model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ pixel_values=pixel_values,
+ image_encoder_embeddings=image_encoder_embeddings,
+ perceiver_embeddings=perceiver_embeddings,
+ image_attention_mask=image_attention_mask,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ interpolate_pos_encoding=interpolate_pos_encoding,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ logits = self.lm_head(hidden_states)
+
+ loss = None
+ if labels is not None:
+ labels = labels.to(logits.device)
+ # Shift so that tokens < n predict n
+ if attention_mask is not None:
+ shift_attention_mask = attention_mask[..., 1:].to(logits.device)
+ shift_logits = logits[..., :-1, :][shift_attention_mask != 0].contiguous()
+ shift_labels = labels[..., 1:][shift_attention_mask != 0].contiguous()
+ else:
+ shift_logits = logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous()
+ # Flatten the tokens
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return (loss,) + output if loss is not None else output
+
+ return IdeficsCausalLMOutputWithPast(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ image_hidden_states=outputs.image_hidden_states,
+ )
+
+ def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
+ image_hidden_states = kwargs.pop("image_hidden_states", None)
+ if image_hidden_states is not None:
+ if self.config.use_resampler:
+ kwargs["perceiver_embeddings"] = image_hidden_states
+ else:
+ kwargs["image_encoder_embeddings"] = image_hidden_states
+ kwargs["pixel_values"] = None
+ inputs = prepare_inputs_for_generation(input_ids, past=past, **kwargs)
+ unwanted_kwargs = ["token_type_ids"]
+ for kwarg in unwanted_kwargs:
+ inputs.pop(kwarg, None)
+ return inputs
+
+ @staticmethod
+ def _expand_inputs_for_generation(
+ *args,
+ **model_kwargs,
+ ):
+ return expand_inputs_for_generation(*args, **model_kwargs)
+
+ def _update_model_kwargs_for_generation(
+ self,
+ outputs: ModelOutput,
+ model_kwargs: Dict[str, Any],
+ is_encoder_decoder: bool = False,
+ standardize_cache_format: bool = False,
+ ) -> Dict[str, Any]:
+ model_kwargs = super()._update_model_kwargs_for_generation(
+ outputs,
+ model_kwargs,
+ is_encoder_decoder,
+ standardize_cache_format,
+ )
+
+ if "image_attention_mask" in model_kwargs:
+ image_attention_mask = model_kwargs["image_attention_mask"]
+ last_mask = image_attention_mask[:, -1, :].unsqueeze(1)
+ model_kwargs["image_attention_mask"] = last_mask
+
+ # Get the precomputed image_hidden_states
+ model_kwargs["image_hidden_states"] = outputs.image_hidden_states
+ return model_kwargs
+
+ @staticmethod
+ def _reorder_cache(past, beam_idx):
+ reordered_past = ()
+ for layer_past in past:
+ reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
+ return reordered_past
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/processing_idefics.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/processing_idefics.py
new file mode 100644
index 0000000000000000000000000000000000000000..d7fd8c8de6555e3e820d807413e5efafd37f8f79
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/processing_idefics.py
@@ -0,0 +1,408 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Processor class for IDEFICS.
+"""
+
+from typing import Callable, List, Optional, Union
+from urllib.parse import urlparse
+
+from ...feature_extraction_utils import BatchFeature
+from ...processing_utils import ProcessorMixin
+from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, TextInput, TruncationStrategy
+from ...utils import TensorType, is_torch_available
+
+
+if is_torch_available():
+ import torch
+
+
+IMAGE_TOKEN = ""
+
+
+# copied from m4.training.packing
+def incremental_to_binary_attention_mask(incremental_mask, num_classes=-1):
+ # This function converts: [-1, 0, 1] => [[0, 0], [1, 0], [0, 1]]
+
+ # If any of images index are more than num_classes, set them to -1.
+ # Words after the max number of images allowed have been seen don't attend on anything
+ if num_classes != -1:
+ incremental_mask[incremental_mask >= num_classes] = -1
+
+ negatives = incremental_mask == -1
+ incremental_mask[negatives] = 0
+ attn_mask = torch.nn.functional.one_hot(incremental_mask, num_classes=num_classes)
+ attn_mask[negatives, :] = 0
+ return attn_mask
+
+
+# copied from m4.training.packing
+def image_attention_mask_for_packed_input_ids(input_ids, tokenizer):
+ image_attention_mask = torch.full_like(input_ids, fill_value=-1)
+ next_image_attention_mask = torch.full_like(input_ids, fill_value=-1)
+ image_token_id = tokenizer.convert_tokens_to_ids(IMAGE_TOKEN)
+ eod_token_id = tokenizer.eos_token_id
+ for batch_idx in range(input_ids.size(0)):
+ count = -1
+ seen_eod = False
+ for idx, token_id in enumerate(input_ids[batch_idx]):
+ if token_id == image_token_id:
+ count += 1
+ image_attention_mask[batch_idx][idx] = count
+ seen_eod = False
+ else:
+ image_attention_mask[batch_idx][idx] = count
+
+ if seen_eod:
+ image_attention_mask[batch_idx][idx] = -1
+
+ if token_id == eod_token_id:
+ seen_eod = True
+
+ for batch_idx in range(input_ids.size(0)):
+ count = -1
+ seen_eod = False
+ for idx in range(input_ids[batch_idx].size(0) - 1, -1, -1):
+ token_id = input_ids[batch_idx][idx]
+ if token_id == image_token_id:
+ count += 1
+ next_image_attention_mask[batch_idx][idx] = count
+ seen_eod = False
+ else:
+ next_image_attention_mask[batch_idx][idx] = count
+
+ if token_id == eod_token_id:
+ seen_eod = True
+
+ if seen_eod:
+ next_image_attention_mask[batch_idx][idx] = -1
+
+ non_negative_indices = next_image_attention_mask[batch_idx] != -1
+ next_image_attention_mask[batch_idx][non_negative_indices] -= count
+ next_image_attention_mask[batch_idx][non_negative_indices] *= -1
+
+ return image_attention_mask, next_image_attention_mask
+
+
+def is_url(string):
+ """Checks if the passed string contains a valid url and nothing else. e.g. if space is included it's immediately
+ invalidated the url"""
+ if " " in string:
+ return False
+ result = urlparse(string)
+ return all([result.scheme, result.netloc])
+
+
+class IdeficsProcessor(ProcessorMixin):
+ r"""
+ Constructs a IDEFICS processor which wraps a LLama tokenizer and IDEFICS image processor into a single processor.
+
+ [`IdeficsProcessor`] offers all the functionalities of [`IdeficsImageProcessor`] and [`LlamaTokenizerFast`]. See
+ the docstring of [`~IdeficsProcessor.__call__`] and [`~IdeficsProcessor.decode`] for more information.
+
+ Args:
+ image_processor (`IdeficsImageProcessor`):
+ An instance of [`IdeficsImageProcessor`]. The image processor is a required input.
+ tokenizer (`LlamaTokenizerFast`):
+ An instance of [`LlamaTokenizerFast`]. The tokenizer is a required input.
+ image_size (`int`, *optional*, defaults to 224): Image size (assuming a square image)
+ """
+
+ attributes = ["image_processor", "tokenizer"]
+ image_processor_class = "IdeficsImageProcessor"
+ tokenizer_class = "LlamaTokenizerFast"
+
+ def __init__(self, image_processor, tokenizer=None, image_size=224, add_end_of_utterance_token=None, **kwargs):
+ if image_processor is None:
+ raise ValueError("You need to specify an `image_processor`.")
+ if tokenizer is None:
+ raise ValueError("You need to specify a `tokenizer`.")
+
+ super().__init__(image_processor, tokenizer)
+ self.current_processor = self.image_processor
+ self.image_token_id = tokenizer.convert_tokens_to_ids(IMAGE_TOKEN)
+
+ self.default_image_dims = (
+ self.image_processor.image_num_channels,
+ self.image_processor.image_size,
+ self.image_processor.image_size,
+ )
+
+ self.tokenizer_was_trained_with_end_of_utterance_token = (
+ True
+ if "" in self.tokenizer.special_tokens_map.get("additional_special_tokens", [])
+ else False
+ )
+
+ def __call__(
+ self,
+ prompts: Union[List[TextInput], List[List[TextInput]]],
+ padding: Union[bool, str, PaddingStrategy] = "longest",
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ transform: Callable = None,
+ add_eos_token=False,
+ add_end_of_utterance_token=None,
+ debug=False,
+ return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH,
+ ) -> BatchEncoding:
+ """This method takes batched or non-batched prompts made of text and images and converts them into prompts that
+ the model was trained on and prepares the image pixel values for the model to process.
+
+ Args:
+ prompts (`Union[List[TextInput], [List[List[TextInput]]]]`):
+ either a single prompt or a batched list of prompts - see the detailed description immediately after
+ the end of the arguments doc section.
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `"longest"`):
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
+ index) among:
+ - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
+ sequence if provided).
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
+ acceptable input length for the model if that argument is not provided.
+ - `False` or `'do_not_pad'`: No padding. This will raise an error if the input sequences are of different
+ lengths.
+ Note: Unlike most processors, which set padding=`False` by default, `IdeficsProcessor` sets `padding="longest"`
+ by default. See https://github.com/huggingface/transformers/pull/29449#pullrequestreview-1925576061 for why.
+ max_length (`int`, *optional*):
+ Maximum length of the returned list and optionally padding length (see above).
+ truncation (`bool`, *optional*):
+ Activates truncation to cut input sequences longer than `max_length` to `max_length`.
+ transform (`Callable`, *optional*):
+ A custom transform function that accepts a single image can be passed for training. For example,
+ `torchvision.Compose` can be used to compose multiple functions. If `None` a preset inference-specific
+ set of transforms will be applied to the images
+ add_eos_token (`bool`, *optional*, defaults to `False`):
+ Adds `eos_token` at the end of the final prompt if True`
+ add_end_of_utterance_token (`bool`, *optional*)
+ Whether to automatically add `` after each prompt's text input (unless followed by an
+ image). If `None` the tokenizer will be checked instead and if this token is found in
+ `additional_special_tokens` then the value will be `True`.
+ debug (`bool`, *optional*, defaults to `False`):
+ `True` value will help debug prompt generation by dumping useful information
+ return_tensors (`str` or `TensorType`, *optional*, defaults to `TensorType.PYTORCH`):
+ The type of tensors to return. Can be one of:
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+
+ Returns:
+ a dict with entries: `input_ids`, `attention_mask`, `pixel_values`, `image_attention_mask` which can be
+ directly passed to `model.generate`
+
+ Detailed explanation:
+
+ Each entry in `prompts` is either a text to be passed as is or an image that will be processed.
+
+ An image can be either an image object (`PIL.Image`) or a url from which the image can be retrieved.
+
+ When the processor encounters an image it'll inject ``
+ entry into the prompt.
+
+ Example:
+
+ ```python
+ checkpoint = "HuggingFaceM4/idefics-9b"
+ processor = AutoProcessor.from_pretrained(checkpoint)
+ url = "https://hips.hearstapps.com/hmg-prod/images/cute-photos-of-cats-in-grass-1593184777.jpg"
+ img = processor.image_processor.fetch_images([url])[0]
+
+ prompts = [
+ "User:",
+ img,
+ "Describe this image.\nAssistant: An image of two kittens in grass.\n",
+ "User:",
+ "https://hips.hearstapps.com/hmg-prod/images/dog-puns-1581708208.jpg",
+ "Describe this image.\nAssistant:",
+ ]
+
+ inputs = processor(prompts, return_tensors="pt")
+ generated_ids = model.generate(**inputs, max_length=100)
+ generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
+ ```
+
+ In this example the `prompts` will be converted into:
+
+ ```
+ User:Describe this image.
+ Assistant: An image of two kittens in grass.
+ User:Describe this image.
+ Assistant:'
+ ```
+
+ and the two images will be massaged using [`IdeficsImageProcessor.__call__`] method and placed inside the
+ `pixel_values` dict entry of the return value.
+
+ This example also examplifies that images can be passed as objects or as text urls. It can be seen that the
+ first image is passed as object and the second one as a url.
+
+ To do training do:
+
+ ```python
+ image_transform = transforms.Compose(
+ [
+ transforms.RandomResizedCrop(
+ (w, h), scale=(0.9, 1.0), interpolation=transforms.InterpolationMode.BICUBIC
+ ),
+ transforms.ToTensor(),
+ transforms.Normalize(mean=self.image_mean, std=self.image_std),
+ ]
+ )
+ inputs = processor(prompts, transform=image_transform, return_tensors="pt")
+ ```
+
+ In order to help debug prompt generation enable `debug=True` which will show you what's happening.
+
+ """
+
+ # if the value isn't overriden by the user, check if the tokenizer was trained with this token and then use it
+ if add_end_of_utterance_token is None:
+ add_end_of_utterance_token = self.tokenizer_was_trained_with_end_of_utterance_token
+
+ # turn non-batched prompts into batched
+ if not any(isinstance(i, list) for i in prompts):
+ prompts = [prompts]
+
+ fake_token = ""
+ image_token = ""
+ end_of_utterance_token = ""
+
+ def image_tokens(last_was_image):
+ if last_was_image:
+ return image_token + fake_token
+ else:
+ return fake_token + image_token + fake_token
+
+ all_prompts = []
+ all_images = []
+ for sample in prompts:
+ # the model was trained on samples starting with
+ full_text = f"{self.tokenizer.bos_token}"
+
+ # an image can either be an image object in the item or the url, everything else is a verbatim prompt text
+ image_objects = []
+ last_was_image = False
+ last_was_text = False
+ for i, item in enumerate(sample):
+ if i > 0:
+ last_was_text = True if not last_was_image else False
+
+ if isinstance(item, str):
+ item = item.strip(" ")
+ if is_url(item):
+ image = self.image_processor.fetch_images(item)
+ full_text += image_tokens(last_was_image)
+ image_objects.append(image)
+ last_was_image = True
+ else:
+ # we add end_of_utterance_token between each subsequent text prompts (but not at the last one!)
+ if add_end_of_utterance_token and last_was_text:
+ full_text += end_of_utterance_token
+ full_text += item
+ last_was_image = False
+ else:
+ # must be an image obj
+ full_text += image_tokens(last_was_image)
+ image_objects.append(item)
+ last_was_image = True
+
+ if add_eos_token:
+ full_text += self.tokenizer.eos_token
+
+ if debug is True:
+ print(f"{full_text=}")
+
+ image_objects = self.image_processor(image_objects, transform=transform)
+
+ all_prompts.append(full_text)
+ all_images.append(image_objects)
+
+ text_encoding = self.tokenizer(
+ text=all_prompts,
+ add_special_tokens=False,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ )
+ all_texts = text_encoding["input_ids"]
+ all_attention_masks = text_encoding["attention_mask"]
+
+ # max_num_images has to be at least 1 even when there are no images
+ max_num_images = max(len(x) for x in all_images)
+ max_num_images = max(1, max_num_images)
+
+ at_least_one_image = sum(len(x) for x in all_images) > 0
+ output_input_ids = []
+ output_images = []
+ output_attention_masks = []
+ for text, attention_mask, images in zip(all_texts, all_attention_masks, all_images):
+ padded_input_ids = text
+
+ image_count = padded_input_ids.count(self.image_token_id)
+ local_max_num_images = min(image_count, max_num_images)
+
+ current_images = images[:local_max_num_images]
+
+ if len(current_images) > 0:
+ padded_image_tensor = torch.zeros(max_num_images, *current_images.size()[1:])
+ padded_image_tensor[: current_images.size(0)] = current_images
+ else:
+ padded_image_tensor = torch.zeros(max_num_images, *self.default_image_dims)
+
+ output_images.append(padded_image_tensor)
+ output_input_ids.append(torch.tensor(padded_input_ids))
+ output_attention_masks.append(torch.tensor(attention_mask))
+
+ output_input_ids = torch.stack(output_input_ids)
+ output_images = torch.stack(output_images)
+ output_attention_masks = torch.stack(output_attention_masks)
+
+ if at_least_one_image:
+ image_attention_mask, _ = image_attention_mask_for_packed_input_ids(output_input_ids, self.tokenizer)
+ image_attention_mask = incremental_to_binary_attention_mask(
+ image_attention_mask, num_classes=max_num_images
+ )
+ else:
+ # in full language mode we set the image mask to all-0s
+ image_attention_mask = torch.zeros(
+ output_input_ids.shape[0], output_input_ids.shape[1], 1, dtype=torch.bool
+ )
+
+ return BatchFeature(
+ data={
+ "input_ids": output_input_ids,
+ "attention_mask": output_attention_masks,
+ "pixel_values": output_images,
+ "image_attention_mask": image_attention_mask,
+ }
+ )
+
+ def batch_decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
+ refer to the docstring of this method for more information.
+ """
+ return self.tokenizer.batch_decode(*args, **kwargs)
+
+ def decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
+ the docstring of this method for more information.
+ """
+ return self.tokenizer.decode(*args, **kwargs)
+
+ @property
+ def model_input_names(self):
+ tokenizer_input_names = self.tokenizer.model_input_names
+ image_processor_input_names = self.image_processor.model_input_names
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/vision.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/vision.py
new file mode 100644
index 0000000000000000000000000000000000000000..d90f837b3c77baed36b1e23175939b264c155d0f
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/idefics/vision.py
@@ -0,0 +1,490 @@
+# coding=utf-8
+# Copyright 2021 The OpenAI Team Authors and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch IdeficsVision model: a copy of CLIPVisionModel using a simpler config object"""
+
+
+import math
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+
+from ...activations import ACT2FN
+from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
+from ...utils import ModelOutput, logging
+from .configuration_idefics import IdeficsVisionConfig
+
+
+logger = logging.get_logger(__name__)
+
+
+@dataclass
+class IdeficsVisionModelOutput(ModelOutput):
+ """
+ Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
+
+ Args:
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
+ The image embeddings obtained by applying the projection layer to the pooler_output.
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ image_embeds: Optional[torch.FloatTensor] = None
+ last_hidden_state: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+# Adapted from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings
+class IdeficsVisionEmbeddings(nn.Module):
+ def __init__(self, config: IdeficsVisionConfig):
+ super().__init__()
+ self.config = config
+ self.embed_dim = config.hidden_size
+ self.image_size = config.image_size
+ self.patch_size = config.patch_size
+
+ self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
+
+ self.patch_embedding = nn.Conv2d(
+ in_channels=config.num_channels,
+ out_channels=self.embed_dim,
+ kernel_size=self.patch_size,
+ stride=self.patch_size,
+ bias=False,
+ )
+
+ self.num_patches = (self.image_size // self.patch_size) ** 2
+ self.num_positions = self.num_patches + 1
+ self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
+ self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
+
+ # Heavily inspired from https://github.com/huggingface/transformers/blob/v4.33.0/src/transformers/models/vit/modeling_vit.py#L82
+ def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
+ """
+ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
+ resolution images.
+
+ Source:
+ https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174
+ """
+
+ num_patches = embeddings.shape[1] - 1
+ pos_embed = self.position_embedding(self.position_ids)
+ num_positions = pos_embed.shape[1] - 1
+ if num_patches == num_positions and height == width:
+ return pos_embed
+ class_pos_embed = pos_embed[:, 0]
+ patch_pos_embed = pos_embed[:, 1:]
+
+ embed_dim = embeddings.shape[-1]
+ num_h_patches = height // self.config.patch_size
+ num_w_patches = width // self.config.patch_size
+ # we add a small number to avoid floating point error in the interpolation
+ # see discussion at https://github.com/facebookresearch/dino/issues/8
+ num_h_patches, num_w_patches = num_h_patches + 0.1, num_w_patches + 0.1
+ sqrt_num_positions = math.sqrt(num_positions)
+ patch_pos_embed = patch_pos_embed.reshape(1, int(sqrt_num_positions), int(sqrt_num_positions), embed_dim)
+ patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
+ fp32_upcasting = patch_pos_embed.dtype == torch.bfloat16
+ if fp32_upcasting:
+ logger.warning_once(
+ "Upcasting patch_pos_embed to fp32 for interpolation since `upsample_bicubic2d_out_frame` in nn.functional.interpolate "
+ "is not implemented for 'torch.bfloat16' dtype. This will result in a slight overhead."
+ )
+ patch_pos_embed = patch_pos_embed.to(torch.float)
+ patch_pos_embed = nn.functional.interpolate(
+ patch_pos_embed,
+ scale_factor=(num_h_patches / sqrt_num_positions, num_w_patches / sqrt_num_positions),
+ mode="bicubic",
+ align_corners=False,
+ )
+ if fp32_upcasting:
+ patch_pos_embed = patch_pos_embed.to(torch.bfloat16)
+ if int(num_h_patches) != patch_pos_embed.shape[-2] or int(num_w_patches) != patch_pos_embed.shape[-1]:
+ raise ValueError(
+ f"Number of patches for images ({int(num_h_patches), int(num_w_patches)}) don't match the "
+ f"shape of position embedding ({patch_pos_embed.shape[-2], patch_pos_embed.shape[-1]})"
+ )
+ patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, embed_dim)
+ return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
+
+ def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool = False) -> torch.Tensor:
+ batch_size, num_channels, height, width = pixel_values.shape
+ if not interpolate_pos_encoding:
+ if height != self.image_size or width != self.image_size:
+ raise ValueError(
+ f"Input image size ({height}*{width}) doesn't match model"
+ f" ({self.image_size}*{self.image_size}). You should try to set `interpolate_pos_encoding=True`"
+ )
+
+ target_dtype = self.patch_embedding.weight.dtype
+ patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
+
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
+
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1)
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
+
+ # add positional encoding to each token
+ if interpolate_pos_encoding:
+ embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
+ else:
+ embeddings = embeddings + self.position_embedding(self.position_ids)
+
+ return embeddings
+
+
+# Copied from transformers.models.clip.modeling_clip.CLIPAttention with CLIP->IdeficsVision
+class IdeficsVisionAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.embed_dim = config.hidden_size
+ self.num_heads = config.num_attention_heads
+ self.head_dim = self.embed_dim // self.num_heads
+ if self.head_dim * self.num_heads != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
+ f" {self.num_heads})."
+ )
+ self.scale = self.head_dim**-0.5
+ self.dropout = config.attention_dropout
+
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ causal_attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ bsz, tgt_len, embed_dim = hidden_states.size()
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scale
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
+ key_states = key_states.view(*proj_shape)
+ value_states = value_states.view(*proj_shape)
+
+ src_len = key_states.size(1)
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
+
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ # apply the causal_attention_mask first
+ if causal_attention_mask is not None:
+ if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
+ f" {causal_attention_mask.size()}"
+ )
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
+ )
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if output_attentions:
+ # this operation is a bit akward, but it's required to
+ # make sure that attn_weights keeps its gradient.
+ # In order to do so, attn_weights have to reshaped
+ # twice and have to be reused in the following
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
+ else:
+ attn_weights_reshaped = None
+
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ attn_output = torch.bmm(attn_probs, value_states)
+
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
+ attn_output = attn_output.transpose(1, 2)
+ attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped
+
+
+# Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->IdeficsVision
+class IdeficsVisionMLP(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.activation_fn = ACT2FN[config.hidden_act]
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.fc1(hidden_states)
+ hidden_states = self.activation_fn(hidden_states)
+ hidden_states = self.fc2(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->IdeficsVision
+class IdeficsVisionEncoderLayer(nn.Module):
+ def __init__(self, config: IdeficsVisionConfig):
+ super().__init__()
+ self.embed_dim = config.hidden_size
+ self.self_attn = IdeficsVisionAttention(config)
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
+ self.mlp = IdeficsVisionMLP(config)
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: torch.Tensor,
+ causal_attention_mask: torch.Tensor,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.FloatTensor]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ `(config.encoder_attention_heads,)`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+
+ hidden_states = self.layer_norm1(hidden_states)
+ hidden_states, attn_weights = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ causal_attention_mask=causal_attention_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = residual + hidden_states
+
+ residual = hidden_states
+ hidden_states = self.layer_norm2(hidden_states)
+ hidden_states = self.mlp(hidden_states)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+# Copied from transformers.models.clip.modeling_clip.CLIPEncoder with CLIP->IdeficsVision
+class IdeficsVisionEncoder(nn.Module):
+ """
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
+ [`IdeficsVisionEncoderLayer`].
+
+ Args:
+ config: IdeficsVisionConfig
+ """
+
+ def __init__(self, config: IdeficsVisionConfig):
+ super().__init__()
+ self.config = config
+ self.layers = nn.ModuleList([IdeficsVisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ inputs_embeds,
+ attention_mask: Optional[torch.Tensor] = None,
+ causal_attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ r"""
+ Args:
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Causal mask for the text model. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ hidden_states = inputs_embeds
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ encoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ causal_attention_mask,
+ output_attentions,
+ )
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask,
+ causal_attention_mask,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+
+# Adapted from transformers.models.clip.modeling_clip.CLIPVisionTransformer
+class IdeficsVisionTransformer(nn.Module):
+ def __init__(self, config: IdeficsVisionConfig):
+ super().__init__()
+ self.config = config
+ embed_dim = config.hidden_size
+
+ self.embeddings = IdeficsVisionEmbeddings(config)
+ self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
+ self.encoder = IdeficsVisionEncoder(config)
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
+
+ # Adapted from transformers.models.clip.modeling_clip.CLIPVisionTransformer.forward
+ def forward(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ interpolate_pos_encoding: Optional[bool] = False,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
+ r"""
+ Returns:
+
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
+ hidden_states = self.pre_layrnorm(hidden_states)
+
+ encoder_outputs = self.encoder(
+ inputs_embeds=hidden_states,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ last_hidden_state = encoder_outputs[0]
+ pooled_output = last_hidden_state[:, 0, :]
+ pooled_output = self.post_layernorm(pooled_output)
+
+ if not return_dict:
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPooling(
+ last_hidden_state=last_hidden_state,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..201db4d272d4b7a45c2f6d7621f0ac0811de2e8e
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/__init__.py
@@ -0,0 +1,69 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
+
+
+_import_structure = {
+ "configuration_instructblip": [
+ "INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
+ "InstructBlipConfig",
+ "InstructBlipQFormerConfig",
+ "InstructBlipVisionConfig",
+ ],
+ "processing_instructblip": ["InstructBlipProcessor"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_instructblip"] = [
+ "INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "InstructBlipQFormerModel",
+ "InstructBlipPreTrainedModel",
+ "InstructBlipForConditionalGeneration",
+ "InstructBlipVisionModel",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_instructblip import (
+ INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
+ InstructBlipConfig,
+ InstructBlipQFormerConfig,
+ InstructBlipVisionConfig,
+ )
+ from .processing_instructblip import InstructBlipProcessor
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_instructblip import (
+ INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
+ InstructBlipForConditionalGeneration,
+ InstructBlipPreTrainedModel,
+ InstructBlipQFormerModel,
+ InstructBlipVisionModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/__pycache__/convert_instructblip_original_to_pytorch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/__pycache__/convert_instructblip_original_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dbc3194113aa18d71b67268eec833a1041948a4b
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/__pycache__/convert_instructblip_original_to_pytorch.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/__pycache__/modeling_instructblip.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/__pycache__/modeling_instructblip.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b843fbc7a5afe079777ef23cdd2cdfbb8a32c719
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/__pycache__/modeling_instructblip.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/configuration_instructblip.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/configuration_instructblip.py
new file mode 100644
index 0000000000000000000000000000000000000000..152389d337f19bdb3168ab2ff9a0a40c350326c2
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/configuration_instructblip.py
@@ -0,0 +1,358 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" InstructBLIP model configuration"""
+
+import os
+from typing import Union
+
+from ...configuration_utils import PretrainedConfig
+from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
+from ...utils import logging
+from ..auto import CONFIG_MAPPING
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class InstructBlipVisionConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`InstructBlipVisionModel`]. It is used to
+ instantiate a InstructBLIP vision encoder according to the specified arguments, defining the model architecture.
+ Instantiating a configuration defaults will yield a similar configuration to that of the InstructBLIP
+ [Salesforce/instruct-blip-flan-t5](https://huggingface.co/Salesforce/instruct-blip-flan-t5) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ hidden_size (`int`, *optional*, defaults to 1408):
+ Dimensionality of the encoder layers and the pooler layer.
+ intermediate_size (`int`, *optional*, defaults to 6144):
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ num_hidden_layers (`int`, *optional*, defaults to 39):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ image_size (`int`, *optional*, defaults to 224):
+ The size (resolution) of each image.
+ patch_size (`int`, *optional*, defaults to 14):
+ The size (resolution) of each patch.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported. to 1e-5): The epsilon used by the layer
+ normalization layers.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
+ The epsilon used by the layer normalization layers.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ initializer_range (`float`, *optional*, defaults to 1e-10):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ qkv_bias (`bool`, *optional*, defaults to `True`):
+ Whether to add a bias to the queries and values in the self-attention layers.
+
+ Example:
+
+ ```python
+ >>> from transformers import InstructBlipVisionConfig, InstructBlipVisionModel
+
+ >>> # Initializing a InstructBlipVisionConfig with Salesforce/instruct-blip-flan-t5 style configuration
+ >>> configuration = InstructBlipVisionConfig()
+
+ >>> # Initializing a InstructBlipVisionModel (with random weights) from the Salesforce/instruct-blip-flan-t5 style configuration
+ >>> model = InstructBlipVisionModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "instructblip_vision_model"
+
+ def __init__(
+ self,
+ hidden_size=1408,
+ intermediate_size=6144,
+ num_hidden_layers=39,
+ num_attention_heads=16,
+ image_size=224,
+ patch_size=14,
+ hidden_act="gelu",
+ layer_norm_eps=1e-6,
+ attention_dropout=0.0,
+ initializer_range=1e-10,
+ qkv_bias=True,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.patch_size = patch_size
+ self.image_size = image_size
+ self.initializer_range = initializer_range
+ self.attention_dropout = attention_dropout
+ self.layer_norm_eps = layer_norm_eps
+ self.hidden_act = hidden_act
+ self.qkv_bias = qkv_bias
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
+ cls._set_token_in_kwargs(kwargs)
+
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
+
+ # get the vision config dict if we are loading from InstructBlipConfig
+ if config_dict.get("model_type") == "instructblip":
+ config_dict = config_dict["vision_config"]
+
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
+ logger.warning(
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
+ )
+
+ return cls.from_dict(config_dict, **kwargs)
+
+
+class InstructBlipQFormerConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`InstructBlipQFormerModel`]. It is used to
+ instantiate a InstructBLIP Querying Transformer (Q-Former) model according to the specified arguments, defining the
+ model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
+ the InstructBLIP [Salesforce/instruct-blip-flan-t5](https://huggingface.co/Salesforce/instruct-blip-flan-t5)
+ architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs.
+ Read the documentation from [`PretrainedConfig`] for more information.
+
+ Note that [`InstructBlipQFormerModel`] is very similar to [`BertLMHeadModel`] with interleaved cross-attention.
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 30522):
+ Vocabulary size of the Q-Former model. Defines the number of different tokens that can be represented by
+ the `inputs_ids` passed when calling the model.
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
+ cross_attention_frequency (`int`, *optional*, defaults to 2):
+ The frequency of adding cross-attention to the Transformer layers.
+ encoder_hidden_size (`int`, *optional*, defaults to 1408):
+ The hidden size of the hidden states for cross-attention.
+
+ Examples:
+
+ ```python
+ >>> from transformers import InstructBlipQFormerConfig, InstructBlipQFormerModel
+
+ >>> # Initializing a InstructBLIP Salesforce/instruct-blip-flan-t5 style configuration
+ >>> configuration = InstructBlipQFormerConfig()
+
+ >>> # Initializing a model (with random weights) from the Salesforce/instruct-blip-flan-t5 style configuration
+ >>> model = InstructBlipQFormerModel(configuration)
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "instructblip_qformer"
+
+ def __init__(
+ self,
+ vocab_size=30522,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.1,
+ attention_probs_dropout_prob=0.1,
+ max_position_embeddings=512,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ pad_token_id=0,
+ position_embedding_type="absolute",
+ cross_attention_frequency=2,
+ encoder_hidden_size=1408,
+ **kwargs,
+ ):
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
+
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.hidden_act = hidden_act
+ self.intermediate_size = intermediate_size
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.max_position_embeddings = max_position_embeddings
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.position_embedding_type = position_embedding_type
+ self.cross_attention_frequency = cross_attention_frequency
+ self.encoder_hidden_size = encoder_hidden_size
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
+ cls._set_token_in_kwargs(kwargs)
+
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
+
+ # get the qformer config dict if we are loading from InstructBlipConfig
+ if config_dict.get("model_type") == "instructblip":
+ config_dict = config_dict["qformer_config"]
+
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
+ logger.warning(
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
+ )
+
+ return cls.from_dict(config_dict, **kwargs)
+
+
+class InstructBlipConfig(PretrainedConfig):
+ r"""
+ [`InstructBlipConfig`] is the configuration class to store the configuration of a
+ [`InstructBlipForConditionalGeneration`]. It is used to instantiate a InstructBLIP model according to the specified
+ arguments, defining the vision model, Q-Former model and language model configs. Instantiating a configuration with
+ the defaults will yield a similar configuration to that of the InstructBLIP
+ [Salesforce/instruct-blip-flan-t5](https://huggingface.co/Salesforce/instruct-blip-flan-t5) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ vision_config (`dict`, *optional*):
+ Dictionary of configuration options used to initialize [`InstructBlipVisionConfig`].
+ qformer_config (`dict`, *optional*):
+ Dictionary of configuration options used to initialize [`InstructBlipQFormerConfig`].
+ text_config (`dict`, *optional*):
+ Dictionary of configuration options used to initialize any [`PretrainedConfig`].
+ num_query_tokens (`int`, *optional*, defaults to 32):
+ The number of query tokens passed through the Transformer.
+
+ kwargs (*optional*):
+ Dictionary of keyword arguments.
+
+ Example:
+
+ ```python
+ >>> from transformers import (
+ ... InstructBlipVisionConfig,
+ ... InstructBlipQFormerConfig,
+ ... OPTConfig,
+ ... InstructBlipConfig,
+ ... InstructBlipForConditionalGeneration,
+ ... )
+
+ >>> # Initializing a InstructBlipConfig with Salesforce/instruct-blip-flan-t5 style configuration
+ >>> configuration = InstructBlipConfig()
+
+ >>> # Initializing a InstructBlipForConditionalGeneration (with random weights) from the Salesforce/instruct-blip-flan-t5 style configuration
+ >>> model = InstructBlipForConditionalGeneration(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+
+ >>> # We can also initialize a InstructBlipConfig from a InstructBlipVisionConfig, InstructBlipQFormerConfig and any PretrainedConfig
+
+ >>> # Initializing InstructBLIP vision, InstructBLIP Q-Former and language model configurations
+ >>> vision_config = InstructBlipVisionConfig()
+ >>> qformer_config = InstructBlipQFormerConfig()
+ >>> text_config = OPTConfig()
+
+ >>> config = InstructBlipConfig.from_text_vision_configs(vision_config, qformer_config, text_config)
+ ```"""
+
+ model_type = "instructblip"
+
+ def __init__(self, vision_config=None, qformer_config=None, text_config=None, num_query_tokens=32, **kwargs):
+ super().__init__(**kwargs)
+
+ if vision_config is None:
+ vision_config = {}
+ logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values.")
+
+ if qformer_config is None:
+ qformer_config = {}
+ logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.")
+
+ if text_config is None:
+ text_config = {}
+ logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`).")
+
+ self.vision_config = InstructBlipVisionConfig(**vision_config)
+ self.qformer_config = InstructBlipQFormerConfig(**qformer_config)
+ text_model_type = text_config["model_type"] if "model_type" in text_config else "opt"
+ self.text_config = CONFIG_MAPPING[text_model_type](**text_config)
+
+ self.tie_word_embeddings = self.text_config.tie_word_embeddings
+ self.is_encoder_decoder = self.text_config.is_encoder_decoder
+
+ self.num_query_tokens = num_query_tokens
+ self.qformer_config.encoder_hidden_size = self.vision_config.hidden_size
+ self.use_decoder_only_language_model = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
+ self.initializer_factor = 1.0
+ self.initializer_range = 0.02
+
+ @classmethod
+ def from_vision_qformer_text_configs(
+ cls,
+ vision_config: InstructBlipVisionConfig,
+ qformer_config: InstructBlipQFormerConfig,
+ text_config: PretrainedConfig,
+ **kwargs,
+ ):
+ r"""
+ Instantiate a [`InstructBlipConfig`] (or a derived class) from a InstructBLIP vision model, Q-Former and
+ language model configurations.
+
+ Returns:
+ [`InstructBlipConfig`]: An instance of a configuration object
+ """
+
+ return cls(
+ vision_config=vision_config.to_dict(),
+ qformer_config=qformer_config.to_dict(),
+ text_config=text_config.to_dict(),
+ **kwargs,
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/convert_instructblip_original_to_pytorch.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/convert_instructblip_original_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..f8b9c86cfddcd6e973b63822d8d91908723a59b9
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/convert_instructblip_original_to_pytorch.py
@@ -0,0 +1,303 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Convert InstructBLIP checkpoints from the original repository.
+
+URL: https://github.com/salesforce/LAVIS/tree/main/projects/instructblip
+"""
+
+import argparse
+
+import requests
+import torch
+
+# pip3 install salesforce-lavis
+# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
+# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
+# same for Vicuna-13b
+from lavis.models import load_model_and_preprocess
+from PIL import Image
+
+from transformers import (
+ AutoTokenizer,
+ BlipImageProcessor,
+ InstructBlipConfig,
+ InstructBlipForConditionalGeneration,
+ InstructBlipProcessor,
+ InstructBlipQFormerConfig,
+ InstructBlipVisionConfig,
+ LlamaConfig,
+ LlamaTokenizerFast,
+ T5Config,
+ T5TokenizerFast,
+)
+from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
+
+
+def load_demo_image():
+ url = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
+ image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
+
+ return image
+
+
+# here we list all keys to be renamed (original name on the left, our name on the right)
+def create_rename_keys(config):
+ rename_keys = []
+ # fmt: off
+
+ # vision encoder
+ rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding"))
+ rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding"))
+ rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight"))
+ rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias"))
+ rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight"))
+ rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias"))
+
+ for i in range(config.vision_config.num_hidden_layers):
+ rename_keys.append((f"visual_encoder.blocks.{i}.norm1.weight", f"vision_model.encoder.layers.{i}.layer_norm1.weight"))
+ rename_keys.append((f"visual_encoder.blocks.{i}.norm1.bias", f"vision_model.encoder.layers.{i}.layer_norm1.bias"))
+ rename_keys.append((f"visual_encoder.blocks.{i}.norm2.weight", f"vision_model.encoder.layers.{i}.layer_norm2.weight"))
+ rename_keys.append((f"visual_encoder.blocks.{i}.norm2.bias", f"vision_model.encoder.layers.{i}.layer_norm2.bias"))
+ rename_keys.append((f"visual_encoder.blocks.{i}.attn.qkv.weight", f"vision_model.encoder.layers.{i}.self_attn.qkv.weight"))
+ rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.weight", f"vision_model.encoder.layers.{i}.self_attn.projection.weight",))
+ rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.bias", f"vision_model.encoder.layers.{i}.self_attn.projection.bias"))
+ rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.weight", f"vision_model.encoder.layers.{i}.mlp.fc1.weight"))
+ rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.bias", f"vision_model.encoder.layers.{i}.mlp.fc1.bias"))
+ rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.weight", f"vision_model.encoder.layers.{i}.mlp.fc2.weight"))
+ rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.bias", f"vision_model.encoder.layers.{i}.mlp.fc2.bias"))
+
+ # QFormer
+ rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight"))
+ rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias"))
+
+ # fmt: on
+ return rename_keys
+
+
+def rename_key(dct, old, new):
+ val = dct.pop(old)
+ dct[new] = val
+
+
+def read_in_q_v_bias(state_dict, config):
+ for i in range(config.vision_config.num_hidden_layers):
+ # read in original q and v biases
+ q_bias = state_dict.pop(f"visual_encoder.blocks.{i}.attn.q_bias")
+ v_bias = state_dict.pop(f"visual_encoder.blocks.{i}.attn.v_bias")
+
+ # next, set bias in the state dict
+ qkv_bias = torch.cat((q_bias, torch.zeros_like(v_bias, requires_grad=False), v_bias))
+ state_dict[f"vision_model.encoder.layers.{i}.self_attn.qkv.bias"] = qkv_bias
+
+
+def get_blip2_config(model_name):
+ image_size = 364 if "coco" in model_name else 224
+ vision_config = InstructBlipVisionConfig(image_size=image_size).to_dict()
+
+ # make sure the models have proper bos_token_id and eos_token_id set (important for generation)
+ # seems like flan-T5 models don't have bos_token_id properly set?
+ if "t5-xl" in model_name:
+ text_config = T5Config.from_pretrained("google/flan-t5-xl", dense_act_fn="gelu", bos_token_id=1).to_dict()
+ elif "t5-xxl" in model_name:
+ text_config = T5Config.from_pretrained("google/flan-t5-xxl", dense_act_fn="gelu", bos_token_id=1).to_dict()
+ elif "vicuna-7b" in model_name:
+ text_config = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf", vocab_size=32001).to_dict()
+ elif "vicuna-13b" in model_name:
+ text_config = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf", vocab_size=32001).to_dict()
+ else:
+ raise ValueError("Model name not supported")
+
+ # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
+ qformer_config = InstructBlipQFormerConfig(vocab_size=30523).to_dict()
+ config = InstructBlipConfig(vision_config=vision_config, text_config=text_config, qformer_config=qformer_config)
+
+ return config, image_size
+
+
+@torch.no_grad()
+def convert_blip2_checkpoint(model_name, pytorch_dump_folder_path=None, push_to_hub=False):
+ """
+ Copy/paste/tweak model's weights to Transformers design.
+ """
+ qformer_tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased", truncation_side="left")
+ qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"})
+
+ if "t5" in model_name:
+ tokenizer = T5TokenizerFast.from_pretrained("google/flan-t5-xl", truncation_side="left")
+ elif "vicuna" in model_name:
+ # the following was used in the original implementation:
+ # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
+ # tokenizer.add_special_tokens({"pad_token": "[PAD]"})
+ # tokenizer.add_special_tokens({"bos_token": ""})
+ # tokenizer.add_special_tokens({"eos_token": ""})
+ # tokenizer.add_special_tokens({"unk_token": ""})
+ tokenizer = LlamaTokenizerFast.from_pretrained(
+ "huggyllama/llama-7b", truncation_side="left", bos_token="", unk_token=""
+ )
+ tokenizer.add_special_tokens({"pad_token": "[PAD]"})
+
+ config, image_size = get_blip2_config(model_name)
+ hf_model = InstructBlipForConditionalGeneration(config).eval()
+
+ model_name_to_original = {
+ "instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"),
+ "instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"),
+ "instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"),
+ "instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"),
+ }
+
+ name, type = model_name_to_original[model_name]
+
+ # load original model
+ print("Loading original model...")
+ hf_model_device = "cuda:1" if torch.cuda.is_available() else "cpu"
+ lavis_device = "cuda:2" if torch.cuda.is_available() else "cpu"
+ original_model, vis_processors, _ = load_model_and_preprocess(
+ name=name, model_type=type, is_eval=True, device=lavis_device
+ )
+ original_model.eval()
+ print("Done!")
+
+ # update state dict keys
+ state_dict = original_model.state_dict()
+ rename_keys = create_rename_keys(config)
+ for src, dest in rename_keys:
+ rename_key(state_dict, src, dest)
+
+ # some keys can be renamed efficiently
+ for key, val in state_dict.copy().items():
+ val = state_dict.pop(key)
+ if key.startswith("Qformer.bert"):
+ key = key.replace("Qformer.bert", "qformer")
+ if "attention.self" in key:
+ key = key.replace("self", "attention")
+ if "llm_proj" in key:
+ key = key.replace("llm_proj", "language_projection")
+ if "t5_proj" in key:
+ key = key.replace("t5_proj", "language_projection")
+ if key.startswith("llm_model"):
+ key = key.replace("llm_model", "language_model")
+ if key.startswith("t5"):
+ key = key.replace("t5", "language")
+ state_dict[key] = val
+
+ # read in qv biases
+ read_in_q_v_bias(state_dict, config)
+
+ # note: weights get loaded in torch.float32 by default
+ hf_model.load_state_dict(state_dict, strict=True)
+
+ image = load_demo_image()
+ prompt = "What is unusual about this image?"
+
+ # create processor
+ image_processor = BlipImageProcessor(
+ size={"height": image_size, "width": image_size}, image_mean=OPENAI_CLIP_MEAN, image_std=OPENAI_CLIP_STD
+ )
+ processor = InstructBlipProcessor(
+ image_processor=image_processor,
+ tokenizer=tokenizer,
+ qformer_tokenizer=qformer_tokenizer,
+ )
+ inputs = processor(images=image, text=prompt, return_tensors="pt").to(hf_model_device)
+
+ # make sure processor creates exact same pixel values
+ original_pixel_values = vis_processors["eval"](image).unsqueeze(0).to(lavis_device)
+ pixel_values = inputs.pixel_values
+ assert torch.allclose(original_pixel_values.to(pixel_values.device), pixel_values)
+
+ original_model.to(lavis_device)
+ hf_model.to(hf_model_device)
+ with torch.no_grad():
+ if "vicuna" in model_name:
+ original_logits = original_model({"image": original_pixel_values, "text_input": [prompt]}).logits
+ logits = hf_model(**inputs).logits
+ else:
+ original_logits = original_model(
+ {"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]}
+ ).logits
+ label_input_ids = tokenizer("\n", return_tensors="pt").input_ids.to(hf_model_device)
+ labels = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id, -100)
+ logits = hf_model(**inputs, labels=labels).logits
+
+ print("First values of original logits:", original_logits[0, :3, :3])
+ print("First values of HF logits:", logits[0, :3, :3])
+
+ # assert values
+ assert original_logits.shape == logits.shape
+ atol = 1e-4 if "vicuna" in model_name else 1e-5
+ assert torch.allclose(original_logits.to(logits.device), logits, atol=atol)
+ print("Looks ok!")
+
+ print("Generating with original model...")
+ original_outputs = original_model.generate({"image": original_pixel_values, "prompt": prompt}, num_beams=5)
+
+ # important: we need to cast the weights of the HF model to the appropriate type
+ print("Generating with HF model...")
+ outputs = hf_model.generate(
+ **inputs,
+ do_sample=False,
+ num_beams=5,
+ max_length=256,
+ min_length=1,
+ top_p=0.9,
+ repetition_penalty=1.5,
+ length_penalty=1.0,
+ temperature=1,
+ )
+ if "vicuna" in model_name:
+ # convert output id 0 to 2 (eos_token_id)
+ # TODO add this in the generate method?
+ outputs[outputs == 0] = 2
+ print("Original generation:", original_outputs)
+ output_text = processor.batch_decode(outputs, skip_special_tokens=True)
+ output_text = [text.strip() for text in output_text]
+ print("HF generation:", output_text)
+
+ if pytorch_dump_folder_path is not None:
+ processor.save_pretrained(pytorch_dump_folder_path)
+ hf_model.save_pretrained(pytorch_dump_folder_path)
+
+ if push_to_hub:
+ processor.push_to_hub(f"Salesforce/{model_name}")
+ hf_model.push_to_hub(f"Salesforce/{model_name}")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ choices = [
+ "instructblip-vicuna-7b",
+ "instructblip-vicuna-13b",
+ "instructblip-flan-t5-xl",
+ "instructblip-flan-t5-xxl",
+ ]
+ parser.add_argument(
+ "--model_name",
+ default="instructblip-flan-t5-xl",
+ choices=choices,
+ type=str,
+ help="Path to hf config.json of model to convert",
+ )
+ parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
+ parser.add_argument(
+ "--push_to_hub",
+ action="store_true",
+ help="Whether to push the model and processor to the hub after converting",
+ )
+
+ args = parser.parse_args()
+
+ convert_blip2_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/modeling_instructblip.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/modeling_instructblip.py
new file mode 100644
index 0000000000000000000000000000000000000000..b18d46723179e2cbcdb5dbadbb3f11806ce89f80
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/modeling_instructblip.py
@@ -0,0 +1,1567 @@
+# coding=utf-8
+# Copyright 2023 The Salesforce Authors and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch InstructBLIP model."""
+
+import math
+from dataclasses import dataclass
+from typing import Any, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import CrossEntropyLoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import (
+ BaseModelOutput,
+ BaseModelOutputWithPastAndCrossAttentions,
+ BaseModelOutputWithPooling,
+ BaseModelOutputWithPoolingAndCrossAttentions,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from ..auto import AutoModelForCausalLM, AutoModelForSeq2SeqLM
+from .configuration_instructblip import InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "Salesforce/instructblip-flan-t5-xl"
+
+
+from ..deprecated._archive_maps import INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+@dataclass
+# Copied from transformers.models.blip_2.modeling_blip_2.Blip2ForConditionalGenerationModelOutput with Blip2->InstructBlip
+class InstructBlipForConditionalGenerationModelOutput(ModelOutput):
+ """
+ Class defining the outputs of [`InstructBlipForConditionalGeneration`].
+
+ Args:
+ loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
+ Language modeling loss from the language model.
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head of the language model.
+ vision_outputs (`BaseModelOutputWithPooling`):
+ Outputs of the vision encoder.
+ qformer_outputs (`BaseModelOutputWithPoolingAndCrossAttentions`):
+ Outputs of the Q-Former (Querying Transformer).
+ language_model_outputs (`CausalLMOutputWithPast` or `Seq2SeqLMOutput`):
+ Outputs of the language model.
+ """
+
+ loss: Optional[Tuple[torch.FloatTensor]] = None
+ logits: Optional[Tuple[torch.FloatTensor]] = None
+ vision_outputs: Optional[torch.FloatTensor] = None
+ qformer_outputs: Optional[Tuple[torch.FloatTensor]] = None
+ language_model_outputs: Optional[Tuple[torch.FloatTensor]] = None
+
+ def to_tuple(self) -> Tuple[Any]:
+ return tuple(
+ self[k]
+ if k not in ["vision_outputs", "qformer_outputs", "language_model_outputs"]
+ else getattr(self, k).to_tuple()
+ for k in self.keys()
+ )
+
+
+# Copied from transformers.models.blip.modeling_blip.BlipVisionEmbeddings with Blip->InstructBlip
+class InstructBlipVisionEmbeddings(nn.Module):
+ def __init__(self, config: InstructBlipVisionConfig):
+ super().__init__()
+ self.config = config
+ self.embed_dim = config.hidden_size
+ self.image_size = config.image_size
+ self.patch_size = config.patch_size
+
+ self.class_embedding = nn.Parameter(torch.randn(1, 1, self.embed_dim))
+
+ self.patch_embedding = nn.Conv2d(
+ in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size
+ )
+
+ self.num_patches = (self.image_size // self.patch_size) ** 2
+ self.num_positions = self.num_patches + 1
+
+ self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
+
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
+ batch_size = pixel_values.shape[0]
+ target_dtype = self.patch_embedding.weight.dtype
+ patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
+
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
+ embeddings = embeddings + self.position_embedding[:, : embeddings.size(1), :].to(target_dtype)
+ return embeddings
+
+
+# Copied from transformers.models.blip_2.modeling_blip_2.Blip2Attention with Blip2->InstructBlip
+class InstructBlipAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.embed_dim = config.hidden_size
+ self.num_heads = config.num_attention_heads
+ self.head_dim = self.embed_dim // self.num_heads
+ if self.head_dim * self.num_heads != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
+ f" {self.num_heads})."
+ )
+ self.scale = self.head_dim**-0.5
+ self.dropout = nn.Dropout(config.attention_dropout)
+
+ # small tweak here compared to CLIP, no bias here
+ self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=False)
+
+ if config.qkv_bias:
+ q_bias = nn.Parameter(torch.zeros(self.embed_dim))
+ v_bias = nn.Parameter(torch.zeros(self.embed_dim))
+ else:
+ q_bias = None
+ v_bias = None
+
+ if q_bias is not None:
+ qkv_bias = torch.cat((q_bias, torch.zeros_like(v_bias, requires_grad=False), v_bias))
+ self.qkv.bias = nn.Parameter(qkv_bias)
+
+ self.projection = nn.Linear(self.embed_dim, self.embed_dim)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ bsz, tgt_len, embed_dim = hidden_states.size()
+
+ mixed_qkv = self.qkv(hidden_states)
+
+ mixed_qkv = mixed_qkv.reshape(bsz, tgt_len, 3, self.num_heads, embed_dim // self.num_heads).permute(
+ 2, 0, 3, 1, 4
+ )
+ query_states, key_states, value_states = mixed_qkv[0], mixed_qkv[1], mixed_qkv[2]
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2))
+
+ attention_scores = attention_scores * self.scale
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_states).permute(0, 2, 1, 3)
+
+ new_context_layer_shape = context_layer.size()[:-2] + (self.embed_dim,)
+ context_layer = context_layer.reshape(new_context_layer_shape)
+
+ output = self.projection(context_layer)
+
+ outputs = (output, attention_probs) if output_attentions else (output, None)
+
+ return outputs
+
+
+# Copied from transformers.models.blip.modeling_blip.BlipMLP
+class InstructBlipMLP(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.activation_fn = ACT2FN[config.hidden_act]
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.fc1(hidden_states)
+ hidden_states = self.activation_fn(hidden_states)
+ hidden_states = self.fc2(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.blip.modeling_blip.BlipEncoderLayer with Blip->InstructBlip
+class InstructBlipEncoderLayer(nn.Module):
+ def __init__(self, config: InstructBlipConfig):
+ super().__init__()
+ self.embed_dim = config.hidden_size
+ self.self_attn = InstructBlipAttention(config)
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
+ self.mlp = InstructBlipMLP(config)
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: torch.Tensor,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.FloatTensor]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ `(config.encoder_attention_heads,)`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+
+ hidden_states = self.layer_norm1(hidden_states)
+ hidden_states, attn_weights = self.self_attn(
+ hidden_states=hidden_states,
+ head_mask=attention_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = hidden_states + residual
+ residual = hidden_states
+ hidden_states = self.layer_norm2(hidden_states)
+ hidden_states = self.mlp(hidden_states)
+
+ hidden_states = hidden_states + residual
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+class InstructBlipPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = InstructBlipConfig
+ base_model_prefix = "blip"
+ supports_gradient_checkpointing = True
+ _no_split_modules = [
+ "InstructBlipQFormerEmbeddings",
+ "InstructBlipAttention",
+ "InstructBlipQFormerMultiHeadAttention",
+ "InstructBlipQFormerSelfOutput",
+ ]
+ _keep_in_fp32_modules = []
+
+ # Copied from transformers.models.blip_2.modeling_blip_2.Blip2PreTrainedModel._init_weights with Blip2->InstructBlip
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ factor = self.config.initializer_range
+ if isinstance(module, nn.Conv2d) or isinstance(module, nn.Embedding) or isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=factor)
+ if hasattr(module, "bias") and module.bias is not None:
+ module.bias.data.zero_()
+
+ if isinstance(module, InstructBlipVisionEmbeddings):
+ if hasattr(self.config, "vision_config"):
+ factor = self.config.vision_config.initializer_range
+ nn.init.trunc_normal_(module.position_embedding, mean=0.0, std=factor)
+ nn.init.trunc_normal_(module.class_embedding, mean=0.0, std=factor)
+
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+ elif isinstance(module, nn.Linear) and module.bias is not None:
+ module.bias.data.zero_()
+
+
+INSTRUCTBLIP_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`InstructBlipConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+INSTRUCTBLIP_VISION_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`InstructBlipProcessor`]. See
+ [`InstructBlipProcessor.__call__`] for details.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+INSTRUCTBLIP_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`InstructBlipProcessor`]. See
+ [`InstructBlipProcessor.__call__`] for details.
+
+ qformer_input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of input sequence tokens in the vocabulary of the Q-Former. Input tokens can optionally be provided
+ to serve as text prompt, which the Q-Former model will encode.
+
+ Indices can be obtained using [`InstructBlipProcessor`]. See [`InstructBlipProcessor.__call__`] for
+ details.
+
+ [What are input IDs?](../glossary#input-ids)
+
+ qformer_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of input sequence tokens in the vocabulary of the language model. Input tokens can optionally be
+ provided to serve as text prompt, which the language model can continue.
+
+ Indices can be obtained using [`InstructBlipProcessor`]. See [`InstructBlipProcessor.__call__`] for
+ details.
+
+ [What are input IDs?](../glossary#input-ids)
+
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary of the language model. Only relevant in case an
+ encoder-decoder language model (like T5) is used.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+
+ Only relevant in case an encoder-decoder language model (like T5) is used.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+# Copied from transformers.models.blip.modeling_blip.BlipEncoder with Blip->InstructBlip
+class InstructBlipEncoder(nn.Module):
+ """
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
+ [`InstructBlipEncoderLayer`].
+
+ Args:
+ config (`InstructBlipConfig`):
+ The corresponding vision configuration for the `InstructBlipEncoder`.
+ """
+
+ def __init__(self, config: InstructBlipConfig):
+ super().__init__()
+ self.config = config
+ self.layers = nn.ModuleList([InstructBlipEncoderLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ inputs_embeds,
+ attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ r"""
+ Args:
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Embedded representation of the inputs. Should be float, not int tokens.
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ hidden_states = inputs_embeds
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ encoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ output_attentions,
+ )
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+
+# Copied from transformers.models.blip.modeling_blip.BlipVisionModel with Blip->InstructBlip, BLIP->INSTRUCTBLIP
+class InstructBlipVisionModel(InstructBlipPreTrainedModel):
+ main_input_name = "pixel_values"
+ config_class = InstructBlipVisionConfig
+
+ def __init__(self, config: InstructBlipVisionConfig):
+ super().__init__(config)
+ self.config = config
+ embed_dim = config.hidden_size
+
+ self.embeddings = InstructBlipVisionEmbeddings(config)
+ self.encoder = InstructBlipEncoder(config)
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
+
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(INSTRUCTBLIP_VISION_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=InstructBlipVisionConfig)
+ def forward(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
+ r"""
+ Returns:
+
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ hidden_states = self.embeddings(pixel_values)
+
+ encoder_outputs = self.encoder(
+ inputs_embeds=hidden_states,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ last_hidden_state = encoder_outputs[0]
+ last_hidden_state = self.post_layernorm(last_hidden_state)
+
+ pooled_output = last_hidden_state[:, 0, :]
+ pooled_output = self.post_layernorm(pooled_output)
+
+ if not return_dict:
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPooling(
+ last_hidden_state=last_hidden_state,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+ def get_input_embeddings(self):
+ return self.embeddings
+
+
+class InstructBlipQFormerMultiHeadAttention(nn.Module):
+ def __init__(self, config, is_cross_attention=False):
+ super().__init__()
+ self.config = config
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ "The hidden size (%d) is not a multiple of the number of attention heads (%d)"
+ % (config.hidden_size, config.num_attention_heads)
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
+ if is_cross_attention:
+ self.key = nn.Linear(config.encoder_hidden_size, self.all_head_size)
+ self.value = nn.Linear(config.encoder_hidden_size, self.all_head_size)
+ else:
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ self.max_position_embeddings = config.max_position_embeddings
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
+ self.save_attention = False
+
+ def save_attn_gradients(self, attn_gradients):
+ self.attn_gradients = attn_gradients
+
+ def get_attn_gradients(self):
+ return self.attn_gradients
+
+ def save_attention_map(self, attention_map):
+ self.attention_map = attention_map
+
+ def get_attention_map(self):
+ return self.attention_map
+
+ def transpose_for_scores(self, x):
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(*new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ past_key_value=None,
+ output_attentions=False,
+ ):
+ # If this is instantiated as a cross-attention module, the keys
+ # and values come from an encoder; the attention mask needs to be
+ # such that the encoder's padding tokens are not attended to.
+ is_cross_attention = encoder_hidden_states is not None
+
+ if is_cross_attention:
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
+ attention_mask = encoder_attention_mask
+ elif past_key_value is not None:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
+ else:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+
+ mixed_query_layer = self.query(hidden_states)
+
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ past_key_value = (key_layer, value_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ seq_length = hidden_states.size()[1]
+ position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
+ position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
+ distance = position_ids_l - position_ids_r
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
+
+ if self.position_embedding_type == "relative_key":
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores
+ elif self.position_embedding_type == "relative_key_query":
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+ attention_scores_dtype = attention_scores.dtype
+
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.Softmax(dim=-1)(attention_scores).to(attention_scores_dtype)
+
+ if is_cross_attention and self.save_attention:
+ self.save_attention_map(attention_probs)
+ attention_probs.register_hook(self.save_attn_gradients)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs_dropped = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs_dropped = attention_probs_dropped * head_mask
+
+ context_layer = torch.matmul(attention_probs_dropped, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(*new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ outputs = outputs + (past_key_value,)
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->InstructBlipQFormer
+class InstructBlipQFormerSelfOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+# Copied from transformers.models.blip_2.modeling_blip_2.Blip2QFormerAttention with Blip2->InstructBlip
+class InstructBlipQFormerAttention(nn.Module):
+ def __init__(self, config, is_cross_attention=False):
+ super().__init__()
+ self.attention = InstructBlipQFormerMultiHeadAttention(config, is_cross_attention)
+ self.output = InstructBlipQFormerSelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.attention.query = prune_linear_layer(self.attention.query, index)
+ self.attention.key = prune_linear_layer(self.attention.key, index)
+ self.attention.value = prune_linear_layer(self.attention.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ self_outputs = self.attention(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+ attention_output = self.output(self_outputs[0], hidden_states)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->InstructBlipQFormer
+class InstructBlipQFormerIntermediate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->InstructBlipQFormer
+class InstructBlipQFormerOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+class InstructBlipQFormerLayer(nn.Module):
+ def __init__(self, config, layer_idx):
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.attention = InstructBlipQFormerAttention(config)
+
+ self.layer_idx = layer_idx
+
+ if layer_idx % config.cross_attention_frequency == 0:
+ self.crossattention = InstructBlipQFormerAttention(config, is_cross_attention=True)
+ self.has_cross_attention = True
+ else:
+ self.has_cross_attention = False
+
+ self.intermediate = InstructBlipQFormerIntermediate(config)
+ self.output = InstructBlipQFormerOutput(config)
+
+ self.intermediate_query = InstructBlipQFormerIntermediate(config)
+ self.output_query = InstructBlipQFormerOutput(config)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ past_key_value=None,
+ output_attentions=False,
+ query_length=0,
+ ):
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ self_attention_outputs = self.attention(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ output_attentions=output_attentions,
+ past_key_value=self_attn_past_key_value,
+ )
+ attention_output = self_attention_outputs[0]
+ outputs = self_attention_outputs[1:-1]
+
+ present_key_value = self_attention_outputs[-1]
+
+ if query_length > 0:
+ query_attention_output = attention_output[:, :query_length, :]
+
+ if self.has_cross_attention:
+ if encoder_hidden_states is None:
+ raise ValueError("encoder_hidden_states must be given for cross-attention layers")
+ cross_attention_outputs = self.crossattention(
+ query_attention_output,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ output_attentions=output_attentions,
+ )
+ query_attention_output = cross_attention_outputs[0]
+ # add cross attentions if we output attention weights
+ outputs = outputs + cross_attention_outputs[1:-1]
+
+ layer_output = apply_chunking_to_forward(
+ self.feed_forward_chunk_query,
+ self.chunk_size_feed_forward,
+ self.seq_len_dim,
+ query_attention_output,
+ )
+
+ if attention_output.shape[1] > query_length:
+ layer_output_text = apply_chunking_to_forward(
+ self.feed_forward_chunk,
+ self.chunk_size_feed_forward,
+ self.seq_len_dim,
+ attention_output[:, query_length:, :],
+ )
+ layer_output = torch.cat([layer_output, layer_output_text], dim=1)
+ else:
+ layer_output = apply_chunking_to_forward(
+ self.feed_forward_chunk,
+ self.chunk_size_feed_forward,
+ self.seq_len_dim,
+ attention_output,
+ )
+ outputs = (layer_output,) + outputs
+
+ outputs = outputs + (present_key_value,)
+
+ return outputs
+
+ def feed_forward_chunk(self, attention_output):
+ intermediate_output = self.intermediate(attention_output)
+ layer_output = self.output(intermediate_output, attention_output)
+ return layer_output
+
+ def feed_forward_chunk_query(self, attention_output):
+ intermediate_output = self.intermediate_query(attention_output)
+ layer_output = self.output_query(intermediate_output, attention_output)
+ return layer_output
+
+
+# Copied from transformers.models.blip_2.modeling_blip_2.Blip2QFormerEncoder with Blip2->InstructBlip
+class InstructBlipQFormerEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList(
+ [InstructBlipQFormerLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
+ )
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ past_key_values=None,
+ use_cache=None,
+ output_attentions=False,
+ output_hidden_states=False,
+ return_dict=True,
+ query_length=0,
+ ):
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+ all_cross_attentions = () if output_attentions else None
+
+ next_decoder_cache = () if use_cache else None
+
+ for i in range(self.config.num_hidden_layers):
+ layer_module = self.layer[i]
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+ past_key_value = past_key_values[i] if past_key_values is not None else None
+
+ if getattr(self.config, "gradient_checkpointing", False) and self.training:
+ if use_cache:
+ logger.warning(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ )
+ else:
+ layer_outputs = layer_module(
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ query_length,
+ )
+
+ hidden_states = layer_outputs[0]
+ if use_cache:
+ next_decoder_cache += (layer_outputs[-1],)
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+ if layer_module.has_cross_attention:
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ hidden_states,
+ next_decoder_cache,
+ all_hidden_states,
+ all_self_attentions,
+ all_cross_attentions,
+ ]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_decoder_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+class InstructBlipQFormerEmbeddings(nn.Module):
+ """Construct the embeddings from word and position embeddings."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
+
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
+ )
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
+
+ self.config = config
+
+ def forward(
+ self,
+ input_ids=None,
+ position_ids=None,
+ query_embeds=None,
+ past_key_values_length=0,
+ ):
+ if input_ids is not None:
+ seq_length = input_ids.size()[1]
+ else:
+ seq_length = 0
+
+ if position_ids is None:
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length].clone()
+
+ if input_ids is not None:
+ embeddings = self.word_embeddings(input_ids)
+ if self.position_embedding_type == "absolute":
+ position_embeddings = self.position_embeddings(position_ids.to(embeddings.device))
+ embeddings = embeddings + position_embeddings
+
+ if query_embeds is not None:
+ embeddings = torch.cat((query_embeds, embeddings), dim=1)
+ else:
+ embeddings = query_embeds
+
+ embeddings = embeddings.to(self.layernorm.weight.dtype)
+ embeddings = self.layernorm(embeddings)
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+
+class InstructBlipQFormerModel(InstructBlipPreTrainedModel):
+ """
+ Querying Transformer (Q-Former), used in InstructBLIP. Slightly modified from BLIP-2 as it also takes the
+ instruction as input.
+ """
+
+ def __init__(self, config: InstructBlipQFormerConfig):
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = InstructBlipQFormerEmbeddings(config)
+
+ self.encoder = InstructBlipQFormerEncoder(config)
+
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.word_embeddings = value
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ def get_extended_attention_mask(
+ self,
+ attention_mask: torch.Tensor,
+ input_shape: Tuple[int],
+ device: torch.device,
+ has_query: bool = False,
+ ) -> torch.Tensor:
+ """
+ Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
+
+ Arguments:
+ attention_mask (`torch.Tensor`):
+ Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
+ input_shape (`Tuple[int]`):
+ The shape of the input to the model.
+ device: (`torch.device`):
+ The device of the input to the model.
+
+ Returns:
+ `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.
+ """
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
+ # ourselves in which case we just need to make it broadcastable to all heads.
+ if attention_mask.dim() == 3:
+ extended_attention_mask = attention_mask[:, None, :, :]
+ elif attention_mask.dim() == 2:
+ # Provided a padding mask of dimensions [batch_size, seq_length]
+ # - the model is an encoder, so make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
+ extended_attention_mask = attention_mask[:, None, None, :]
+ else:
+ raise ValueError(
+ f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})",
+ )
+
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
+ # masked positions, this operation will create a tensor which is 0.0 for
+ # positions we want to attend and -10000.0 for masked positions.
+ # Since we are adding it to the raw scores before the softmax, this is
+ # effectively the same as removing these entirely.
+ extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
+ extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
+ return extended_attention_mask
+
+ def forward(
+ self,
+ input_ids: torch.LongTensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ query_embeds: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.FloatTensor], BaseModelOutputWithPoolingAndCrossAttentions]:
+ r"""
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of:
+ shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and
+ value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are
+ used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key
+ value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape
+ `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is None and query_embeds is None:
+ raise ValueError("You have to specify query_embeds when input_ids is None")
+
+ # past_key_values_length
+ past_key_values_length = (
+ past_key_values[0][0].shape[2] - self.config.query_length if past_key_values is not None else 0
+ )
+
+ query_length = query_embeds.shape[1] if query_embeds is not None else 0
+
+ embedding_output = self.embeddings(
+ input_ids=input_ids,
+ position_ids=position_ids,
+ query_embeds=query_embeds,
+ past_key_values_length=past_key_values_length,
+ )
+
+ input_shape = embedding_output.size()[:-1]
+ batch_size, seq_length = input_shape
+ device = embedding_output.device
+
+ if attention_mask is None:
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
+
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
+ # ourselves in which case we just need to make it broadcastable to all heads.
+ extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device)
+
+ # If a 2D or 3D attention mask is provided for the cross-attention
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
+ if encoder_hidden_states is not None:
+ if isinstance(encoder_hidden_states, list):
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
+ else:
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
+
+ if isinstance(encoder_attention_mask, list):
+ encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
+ elif encoder_attention_mask is None:
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
+ else:
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
+ else:
+ encoder_extended_attention_mask = None
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_extended_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ query_length=query_length,
+ )
+ sequence_output = encoder_outputs[0]
+ pooled_output = sequence_output[:, 0, :]
+
+ if not return_dict:
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPoolingAndCrossAttentions(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ past_key_values=encoder_outputs.past_key_values,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ cross_attentions=encoder_outputs.cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ InstructBLIP Model for generating text given an image and an optional text prompt. The model consists of a vision
+ encoder, Querying Transformer (Q-Former) and a language model.
+
+ One can optionally pass `input_ids` to the model, which serve as a text prompt, to make the language model continue
+ the prompt. Otherwise, the language model starts generating text from the [BOS] (beginning-of-sequence) token.
+ """,
+ INSTRUCTBLIP_START_DOCSTRING,
+)
+class InstructBlipForConditionalGeneration(InstructBlipPreTrainedModel):
+ config_class = InstructBlipConfig
+ main_input_name = "pixel_values"
+
+ def __init__(self, config: InstructBlipConfig):
+ super().__init__(config)
+
+ self.vision_model = InstructBlipVisionModel(config.vision_config)
+
+ self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size))
+ self.qformer = InstructBlipQFormerModel(config.qformer_config)
+
+ self.language_projection = nn.Linear(config.qformer_config.hidden_size, config.text_config.hidden_size)
+
+ if config.use_decoder_only_language_model:
+ language_model = AutoModelForCausalLM.from_config(config.text_config)
+ else:
+ language_model = AutoModelForSeq2SeqLM.from_config(config.text_config)
+
+ if language_model._no_split_modules is not None:
+ self._no_split_modules.extend(language_model._no_split_modules)
+
+ if language_model._keep_in_fp32_modules is not None:
+ self._keep_in_fp32_modules.extend(language_model._keep_in_fp32_modules)
+
+ self.language_model = language_model
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.language_model.get_input_embeddings()
+
+ def set_input_embeddings(self, value):
+ self.language_model.set_input_embeddings(value)
+
+ def set_output_embeddings(self, new_embeddings):
+ self.language_model.set_output_embeddings(new_embeddings)
+
+ def get_output_embeddings(self) -> nn.Module:
+ return self.language_model.get_output_embeddings()
+
+ def get_encoder(self):
+ return self.language_model.get_encoder()
+
+ def get_decoder(self):
+ return self.language_model.get_decoder()
+
+ def _tie_weights(self):
+ if not self.config.use_decoder_only_language_model:
+ self.language_model.encoder.embed_tokens = self.language_model.shared
+ self.language_model.decoder.embed_tokens = self.language_model.shared
+
+ def _preprocess_accelerate(self):
+ r"""
+ Some pre-processing hacks to make the model `accelerate` compatible. Check
+ https://github.com/huggingface/transformers/pull/21707 for more details.
+ """
+ hf_device_map = self.hf_device_map
+
+ if len(hf_device_map) > 1 and "language_model" not in hf_device_map and torch.cuda.device_count() > 1:
+ # warn users about unexpected behavior when using multi-GPU + InstructBLIP + `accelerate`.
+ logger.warning(
+ "The `language_model` is not in the `hf_device_map` dictionary and you are running your script"
+ " in a multi-GPU environment. this may lead to unexpected behavior when using `accelerate`."
+ " Please pass a `device_map` that contains `language_model` to remove this warning."
+ " Please refer to https://github.com/huggingface/blog/blob/main/accelerate-large-models.md for"
+ " more details on creating a `device_map` for large models.",
+ )
+
+ if hasattr(self.language_model, "_hf_hook"):
+ self.language_model._hf_hook.io_same_device = True # For `generate` compatibility
+
+ @add_start_docstrings_to_model_forward(INSTRUCTBLIP_INPUTS_DOCSTRING)
+ @replace_return_docstrings(
+ output_type=InstructBlipForConditionalGenerationModelOutput, config_class=InstructBlipVisionConfig
+ )
+ def forward(
+ self,
+ pixel_values: torch.FloatTensor,
+ qformer_input_ids: torch.FloatTensor,
+ qformer_attention_mask: Optional[torch.LongTensor] = None,
+ input_ids: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ labels: Optional[torch.LongTensor] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, InstructBlipForConditionalGenerationModelOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size -
+ 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
+ config.vocab_size]`
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import InstructBlipProcessor, InstructBlipForConditionalGeneration
+ >>> import torch
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> model = InstructBlipForConditionalGeneration.from_pretrained("Salesforce/instructblip-vicuna-7b")
+ >>> processor = InstructBlipProcessor.from_pretrained("Salesforce/instructblip-vicuna-7b")
+
+ >>> device = "cuda" if torch.cuda.is_available() else "cpu"
+ >>> model.to(device) # doctest: +IGNORE_RESULT
+
+ >>> url = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
+ >>> prompt = "What is unusual about this image?"
+ >>> inputs = processor(images=image, text=prompt, return_tensors="pt").to(device)
+
+ >>> outputs = model.generate(
+ ... **inputs,
+ ... do_sample=False,
+ ... num_beams=5,
+ ... max_length=256,
+ ... min_length=1,
+ ... top_p=0.9,
+ ... repetition_penalty=1.5,
+ ... length_penalty=1.0,
+ ... temperature=1,
+ ... )
+ >>> generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0].strip()
+ >>> print(generated_text)
+ The unusual aspect of this image is that a man is ironing clothes on the back of a yellow SUV, which is parked in the middle of a busy city street. This is an unconventional approach to ironing clothes, as it requires the man to balance himself and his ironing equipment on top of the vehicle while navigating through traffic. Additionally, the presence of taxis and other vehicles in the scene further emphasizes the unusual nature of this situation.
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # step 1: forward the images through the vision encoder,
+ # to get image embeddings of shape (batch_size, seq_len, hidden_size)
+ vision_outputs = self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ image_embeds = vision_outputs[0]
+
+ # step 2: forward the query tokens through the QFormer, using the image embeddings for cross-attention
+ image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
+
+ # difference with BLIP-2 here: we also feed the instruction prompt to the Q-Former
+ query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
+ query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long, device=image_embeds.device)
+ if qformer_attention_mask is None:
+ qformer_attention_mask = torch.ones_like(qformer_input_ids)
+ qformer_attention_mask = torch.cat([query_attention_mask, qformer_attention_mask], dim=1)
+ query_outputs = self.qformer(
+ input_ids=qformer_input_ids,
+ attention_mask=qformer_attention_mask,
+ query_embeds=query_tokens,
+ encoder_hidden_states=image_embeds,
+ encoder_attention_mask=image_attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ query_output = query_outputs[0][:, : query_tokens.size(1), :]
+
+ # step 3: use the language model, conditioned on the query outputs and the prompt
+ language_model_inputs = self.language_projection(query_output)
+ language_model_attention_mask = torch.ones(
+ language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device
+ )
+
+ inputs_embeds = self.language_model.get_input_embeddings()(input_ids)
+
+ inputs_embeds = torch.cat([language_model_inputs, inputs_embeds.to(language_model_inputs.device)], dim=1)
+
+ if attention_mask is None:
+ attention_mask = torch.ones_like(input_ids)
+ attention_mask = torch.cat([language_model_attention_mask.to(attention_mask.device), attention_mask], dim=1)
+
+ if self.config.use_decoder_only_language_model:
+ outputs = self.language_model(
+ inputs_embeds=inputs_embeds,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ logits = outputs.logits if return_dict else outputs[0]
+ loss = None
+ # we compute the loss here since we need to take into account the sequence length of the query embeds
+ if labels is not None:
+ labels = labels.to(logits.device)
+ logits = logits[:, -labels.size(1) :, :]
+ # Shift so that tokens < n predict n
+ shift_logits = logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous().to(logits.device)
+
+ # Flatten the tokens
+ loss_fct = CrossEntropyLoss(reduction="mean")
+
+ loss = loss_fct(shift_logits.view(-1, self.config.text_config.vocab_size), shift_labels.view(-1))
+ else:
+ outputs = self.language_model(
+ inputs_embeds=inputs_embeds,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ labels=labels,
+ )
+ loss = outputs.loss if return_dict else outputs[0]
+ logits = outputs.logits if return_dict else outputs[1]
+
+ if not return_dict:
+ output = (logits, vision_outputs, query_outputs, outputs)
+ return ((loss,) + output) if loss is not None else output
+
+ return InstructBlipForConditionalGenerationModelOutput(
+ loss=loss,
+ logits=logits,
+ vision_outputs=vision_outputs,
+ qformer_outputs=query_outputs,
+ language_model_outputs=outputs,
+ )
+
+ @torch.no_grad()
+ def generate(
+ self,
+ pixel_values: torch.FloatTensor,
+ qformer_input_ids: Optional[torch.LongTensor] = None,
+ qformer_attention_mask: Optional[torch.LongTensor] = None,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ **generate_kwargs,
+ ) -> torch.LongTensor:
+ """
+ Overrides `generate` function to be able to use the model as a conditional generator.
+
+ Args:
+ pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width)):
+ Input images to be processed.
+ qformer_input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
+ The sequence used as a prompt to be fed to the Q-Former module.
+ qformer_attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
+ Mask to avoid performing attention on padding token indices.
+ input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
+ The sequence used as a prompt for the generation.
+ attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
+ Mask to avoid performing attention on padding token indices.
+
+ Returns:
+ captions (list): A list of strings of length batch_size * num_captions.
+ """
+ if hasattr(self, "hf_device_map"):
+ # preprocess for `accelerate`
+ self._preprocess_accelerate()
+
+ batch_size = pixel_values.shape[0]
+ image_embeds = self.vision_model(pixel_values, return_dict=True).last_hidden_state
+
+ image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
+
+ query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
+ query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long, device=image_embeds.device)
+ if qformer_attention_mask is None:
+ qformer_attention_mask = torch.ones_like(qformer_input_ids)
+ qformer_attention_mask = torch.cat([query_attention_mask, qformer_attention_mask], dim=1)
+ query_outputs = self.qformer(
+ input_ids=qformer_input_ids,
+ attention_mask=qformer_attention_mask,
+ query_embeds=query_tokens,
+ encoder_hidden_states=image_embeds,
+ encoder_attention_mask=image_attention_mask,
+ return_dict=True,
+ )
+ query_output = query_outputs.last_hidden_state[:, : query_tokens.size(1), :]
+
+ language_model_inputs = self.language_projection(query_output)
+ language_attention_mask = torch.ones(
+ language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device
+ )
+
+ if input_ids is None:
+ input_ids = (
+ torch.LongTensor([[self.config.text_config.bos_token_id]])
+ .repeat(batch_size, 1)
+ .to(image_embeds.device)
+ )
+ if attention_mask is None:
+ attention_mask = torch.ones_like(input_ids)
+ attention_mask = torch.cat([language_attention_mask, attention_mask.to(language_attention_mask.device)], dim=1)
+
+ # concatenate query embeddings with prompt embeddings
+ inputs_embeds = self.get_input_embeddings()(input_ids)
+ inputs_embeds = torch.cat([language_model_inputs, inputs_embeds.to(language_model_inputs.device)], dim=1)
+
+ # add image_embeds length to max_length, so that the final max_length in counted only on token embeds
+ # -1 is to account for the prepended BOS after `generate.`
+ if not self.language_model.config.is_encoder_decoder:
+ generate_kwargs["max_length"] = generate_kwargs.get("max_length", 20) + language_model_inputs.shape[1] - 1
+ generate_kwargs["min_length"] = generate_kwargs.get("min_length", 0) + language_model_inputs.shape[1]
+
+ outputs = self.language_model.generate(
+ inputs_embeds=inputs_embeds,
+ attention_mask=attention_mask,
+ **generate_kwargs,
+ )
+
+ # this is a temporary workaround to be consistent with other generation models and
+ # have BOS as the first token, even though under the hood we are calling LM with embeds
+ if not self.language_model.config.is_encoder_decoder:
+ # the InstructBLIP authors used inconsistent tokenizer/model files during training,
+ # with the tokenizer's bos token being set to which has ID=2,
+ # whereas the model's text config has bos token id = 0
+ bos_token_id = (
+ 2
+ if self.config.text_config.architectures[0] == "LLaMAForCausalLM"
+ else self.config.text_config.bos_token_id
+ )
+ bos_tokens = torch.LongTensor([[bos_token_id]]).repeat(batch_size, 1).to(image_embeds.device)
+ if not isinstance(outputs, torch.Tensor):
+ outputs.sequences = torch.cat([bos_tokens, outputs.sequences], dim=-1)
+ else:
+ outputs = torch.cat([bos_tokens, outputs], dim=-1)
+
+ return outputs
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/processing_instructblip.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/processing_instructblip.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d266d8b98e34a37088a158dfa60e9692b70e2b5
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/instructblip/processing_instructblip.py
@@ -0,0 +1,173 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Processor class for InstructBLIP. Largely copy of Blip2Processor with addition of a tokenizer for the Q-Former.
+"""
+
+import os
+from typing import List, Optional, Union
+
+from ...image_processing_utils import BatchFeature
+from ...image_utils import ImageInput
+from ...processing_utils import ProcessorMixin
+from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
+from ...utils import TensorType
+from ..auto import AutoTokenizer
+
+
+class InstructBlipProcessor(ProcessorMixin):
+ r"""
+ Constructs an InstructBLIP processor which wraps a BLIP image processor and a LLaMa/T5 tokenizer into a single
+ processor.
+
+ [`InstructBlipProcessor`] offers all the functionalities of [`BlipImageProcessor`] and [`AutoTokenizer`]. See the
+ docstring of [`~BlipProcessor.__call__`] and [`~BlipProcessor.decode`] for more information.
+
+ Args:
+ image_processor (`BlipImageProcessor`):
+ An instance of [`BlipImageProcessor`]. The image processor is a required input.
+ tokenizer (`AutoTokenizer`):
+ An instance of ['PreTrainedTokenizer`]. The tokenizer is a required input.
+ qformer_tokenizer (`AutoTokenizer`):
+ An instance of ['PreTrainedTokenizer`]. The Q-Former tokenizer is a required input.
+ """
+
+ attributes = ["image_processor", "tokenizer"]
+ image_processor_class = "BlipImageProcessor"
+ tokenizer_class = "AutoTokenizer"
+
+ def __init__(self, image_processor, tokenizer, qformer_tokenizer):
+ super().__init__(image_processor, tokenizer)
+
+ # add QFormer tokenizer
+ self.qformer_tokenizer = qformer_tokenizer
+
+ def __call__(
+ self,
+ images: ImageInput = None,
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_token_type_ids: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ **kwargs,
+ ) -> BatchFeature:
+ """
+ This method uses [`BlipImageProcessor.__call__`] method to prepare image(s) for the model, and
+ [`BertTokenizerFast.__call__`] to prepare text for the model.
+
+ Please refer to the docstring of the above two methods for more information.
+ """
+ if images is None and text is None:
+ raise ValueError("You have to specify at least images or text.")
+
+ encoding = BatchFeature()
+
+ if text is not None:
+ text_encoding = self.tokenizer(
+ text=text,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_token_type_ids=return_token_type_ids,
+ return_length=return_length,
+ verbose=verbose,
+ return_tensors=return_tensors,
+ **kwargs,
+ )
+ encoding.update(text_encoding)
+ qformer_text_encoding = self.qformer_tokenizer(
+ text=text,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_token_type_ids=return_token_type_ids,
+ return_length=return_length,
+ verbose=verbose,
+ return_tensors=return_tensors,
+ **kwargs,
+ )
+ encoding["qformer_input_ids"] = qformer_text_encoding.pop("input_ids")
+ encoding["qformer_attention_mask"] = qformer_text_encoding.pop("attention_mask")
+
+ if images is not None:
+ image_encoding = self.image_processor(images, return_tensors=return_tensors)
+ encoding.update(image_encoding)
+
+ return encoding
+
+ # Copied from transformers.models.blip.processing_blip.BlipProcessor.batch_decode with BertTokenizerFast->PreTrainedTokenizer
+ def batch_decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
+ refer to the docstring of this method for more information.
+ """
+ return self.tokenizer.batch_decode(*args, **kwargs)
+
+ # Copied from transformers.models.blip.processing_blip.BlipProcessor.decode with BertTokenizerFast->PreTrainedTokenizer
+ def decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to
+ the docstring of this method for more information.
+ """
+ return self.tokenizer.decode(*args, **kwargs)
+
+ @property
+ # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
+ def model_input_names(self):
+ tokenizer_input_names = self.tokenizer.model_input_names
+ image_processor_input_names = self.image_processor.model_input_names
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
+
+ # overwrite to save the Q-Former tokenizer in a separate folder
+ def save_pretrained(self, save_directory, **kwargs):
+ if os.path.isfile(save_directory):
+ raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file")
+ os.makedirs(save_directory, exist_ok=True)
+ qformer_tokenizer_path = os.path.join(save_directory, "qformer_tokenizer")
+ self.qformer_tokenizer.save_pretrained(qformer_tokenizer_path)
+ return super().save_pretrained(save_directory, **kwargs)
+
+ # overwrite to load the Q-Former tokenizer from a separate folder
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
+ qformer_tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path, subfolder="qformer_tokenizer")
+ args = cls._get_arguments_from_pretrained(pretrained_model_name_or_path, **kwargs)
+ args.append(qformer_tokenizer)
+ return cls(*args)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4f7e775431dd0a250dbbb5ca422f1a81be919225
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__init__.py
@@ -0,0 +1,117 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_tf_available,
+ is_tokenizers_available,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
+ "tokenization_lxmert": ["LxmertTokenizer"],
+}
+
+try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_lxmert_fast"] = ["LxmertTokenizerFast"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_lxmert"] = [
+ "LxmertEncoder",
+ "LxmertForPreTraining",
+ "LxmertForQuestionAnswering",
+ "LxmertModel",
+ "LxmertPreTrainedModel",
+ "LxmertVisualFeatureEncoder",
+ "LxmertXLayer",
+ ]
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_lxmert"] = [
+ "TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TFLxmertForPreTraining",
+ "TFLxmertMainLayer",
+ "TFLxmertModel",
+ "TFLxmertPreTrainedModel",
+ "TFLxmertVisualFeatureEncoder",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
+ from .tokenization_lxmert import LxmertTokenizer
+
+ try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_lxmert_fast import LxmertTokenizerFast
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_lxmert import (
+ LxmertEncoder,
+ LxmertForPreTraining,
+ LxmertForQuestionAnswering,
+ LxmertModel,
+ LxmertPreTrainedModel,
+ LxmertVisualFeatureEncoder,
+ LxmertXLayer,
+ )
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_lxmert import (
+ TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TFLxmertForPreTraining,
+ TFLxmertMainLayer,
+ TFLxmertModel,
+ TFLxmertPreTrainedModel,
+ TFLxmertVisualFeatureEncoder,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a71d48ad54c323705cb7624540d3b754cda08390
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/configuration_lxmert.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/configuration_lxmert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..88d4811920f656e4bf5bff526d4f1b1615ee3531
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/configuration_lxmert.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/convert_lxmert_original_tf_checkpoint_to_pytorch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/convert_lxmert_original_tf_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d1c69a8aa84e79616c9e328adc3d65aa620eea84
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/convert_lxmert_original_tf_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/modeling_lxmert.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/modeling_lxmert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..44d6fdc6b2ee8de31a9e6794403c5a0339c858bc
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/modeling_lxmert.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/modeling_tf_lxmert.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/modeling_tf_lxmert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a9634ccee8d4545431998e304784485d1bd5649a
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/modeling_tf_lxmert.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/tokenization_lxmert.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/tokenization_lxmert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..52515d99c45ee2df056317028b9ada460008e261
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/tokenization_lxmert.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/tokenization_lxmert_fast.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/tokenization_lxmert_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..96545d67fc5c4d285634bc3eaa79086a1531d152
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/tokenization_lxmert_fast.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/configuration_lxmert.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/configuration_lxmert.py
new file mode 100644
index 0000000000000000000000000000000000000000..b79fb67908d27e8d36a19c30c3af296e2abe785a
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/configuration_lxmert.py
@@ -0,0 +1,170 @@
+# coding=utf-8
+# Copyright 2018, Hao Tan, Mohit Bansal
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" LXMERT model configuration"""
+
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class LxmertConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`LxmertModel`] or a [`TFLxmertModel`]. It is used
+ to instantiate a LXMERT model according to the specified arguments, defining the model architecture. Instantiating
+ a configuration with the defaults will yield a similar configuration to that of the Lxmert
+ [unc-nlp/lxmert-base-uncased](https://huggingface.co/unc-nlp/lxmert-base-uncased) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 30522):
+ Vocabulary size of the LXMERT model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`LxmertModel`] or [`TFLxmertModel`].
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ num_qa_labels (`int`, *optional*, defaults to 9500):
+ This represents the total number of different question answering (QA) labels there are. If using more than
+ one dataset with QA, the user will need to account for the total number of labels that all of the datasets
+ have in total.
+ num_object_labels (`int`, *optional*, defaults to 1600):
+ This represents the total number of semantically unique objects that lxmert will be able to classify a
+ pooled-object feature as belonging too.
+ num_attr_labels (`int`, *optional*, defaults to 400):
+ This represents the total number of semantically unique attributes that lxmert will be able to classify a
+ pooled-object feature as possessing.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ type_vocab_size (`int`, *optional*, defaults to 2):
+ The vocabulary size of the *token_type_ids* passed into [`BertModel`].
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ l_layers (`int`, *optional*, defaults to 9):
+ Number of hidden layers in the Transformer language encoder.
+ x_layers (`int`, *optional*, defaults to 5):
+ Number of hidden layers in the Transformer cross modality encoder.
+ r_layers (`int`, *optional*, defaults to 5):
+ Number of hidden layers in the Transformer visual encoder.
+ visual_feat_dim (`int`, *optional*, defaults to 2048):
+ This represents the last dimension of the pooled-object features used as input for the model, representing
+ the size of each object feature itself.
+ visual_pos_dim (`int`, *optional*, defaults to 4):
+ This represents the number of spacial features that are mixed into the visual features. The default is set
+ to 4 because most commonly this will represent the location of a bounding box. i.e., (x, y, width, height)
+ visual_loss_normalizer (`float`, *optional*, defaults to 6.67):
+ This represents the scaling factor in which each visual loss is multiplied by if during pretraining, one
+ decided to train with multiple vision-based loss objectives.
+ task_matched (`bool`, *optional*, defaults to `True`):
+ This task is used for sentence-image matching. If the sentence correctly describes the image the label will
+ be 1. If the sentence does not correctly describe the image, the label will be 0.
+ task_mask_lm (`bool`, *optional*, defaults to `True`):
+ Whether or not to add masked language modeling (as used in pretraining models such as BERT) to the loss
+ objective.
+ task_obj_predict (`bool`, *optional*, defaults to `True`):
+ Whether or not to add object prediction, attribute prediction and feature regression to the loss objective.
+ task_qa (`bool`, *optional*, defaults to `True`):
+ Whether or not to add the question-answering loss to the objective
+ visual_obj_loss (`bool`, *optional*, defaults to `True`):
+ Whether or not to calculate the object-prediction loss objective
+ visual_attr_loss (`bool`, *optional*, defaults to `True`):
+ Whether or not to calculate the attribute-prediction loss objective
+ visual_feat_loss (`bool`, *optional*, defaults to `True`):
+ Whether or not to calculate the feature-regression loss objective
+ """
+
+ model_type = "lxmert"
+ attribute_map = {}
+
+ def __init__(
+ self,
+ vocab_size=30522,
+ hidden_size=768,
+ num_attention_heads=12,
+ num_qa_labels=9500,
+ num_object_labels=1600,
+ num_attr_labels=400,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.1,
+ attention_probs_dropout_prob=0.1,
+ max_position_embeddings=512,
+ type_vocab_size=2,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ l_layers=9,
+ x_layers=5,
+ r_layers=5,
+ visual_feat_dim=2048,
+ visual_pos_dim=4,
+ visual_loss_normalizer=6.67,
+ task_matched=True,
+ task_mask_lm=True,
+ task_obj_predict=True,
+ task_qa=True,
+ visual_obj_loss=True,
+ visual_attr_loss=True,
+ visual_feat_loss=True,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.num_attention_heads = num_attention_heads
+ self.hidden_act = hidden_act
+ self.intermediate_size = intermediate_size
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.max_position_embeddings = max_position_embeddings
+ self.type_vocab_size = type_vocab_size
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.num_qa_labels = num_qa_labels
+ self.num_object_labels = num_object_labels
+ self.num_attr_labels = num_attr_labels
+ self.l_layers = l_layers
+ self.x_layers = x_layers
+ self.r_layers = r_layers
+ self.visual_feat_dim = visual_feat_dim
+ self.visual_pos_dim = visual_pos_dim
+ self.visual_loss_normalizer = visual_loss_normalizer
+ self.task_matched = task_matched
+ self.task_mask_lm = task_mask_lm
+ self.task_obj_predict = task_obj_predict
+ self.task_qa = task_qa
+ self.visual_obj_loss = visual_obj_loss
+ self.visual_attr_loss = visual_attr_loss
+ self.visual_feat_loss = visual_feat_loss
+ self.num_hidden_layers = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
+ super().__init__(**kwargs)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/convert_lxmert_original_tf_checkpoint_to_pytorch.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/convert_lxmert_original_tf_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..f8eb86f1d1e48a1459154b647fb2f4178df338b0
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/convert_lxmert_original_tf_checkpoint_to_pytorch.py
@@ -0,0 +1,60 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert LXMERT checkpoint."""
+
+
+import argparse
+
+import torch
+
+from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
+from transformers.utils import logging
+
+
+logging.set_verbosity_info()
+
+
+def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path):
+ # Initialise PyTorch model
+ config = LxmertConfig.from_json_file(config_file)
+ print(f"Building PyTorch model from configuration: {config}")
+ model = LxmertForPreTraining(config)
+
+ # Load weights from tf checkpoint
+ load_tf_weights_in_lxmert(model, config, tf_checkpoint_path)
+
+ # Save pytorch-model
+ print(f"Save PyTorch model to {pytorch_dump_path}")
+ torch.save(model.state_dict(), pytorch_dump_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
+ )
+ parser.add_argument(
+ "--config_file",
+ default=None,
+ type=str,
+ required=True,
+ help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
+ )
+ parser.add_argument(
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
+ )
+ args = parser.parse_args()
+ convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/modeling_lxmert.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/modeling_lxmert.py
new file mode 100644
index 0000000000000000000000000000000000000000..6e2ae7d22e7cacccb7b8ab6213961c0a9c7ce68a
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/modeling_lxmert.py
@@ -0,0 +1,1434 @@
+# coding=utf-8
+# Copyright 2018 Hao Tan, Mohit Bansal, and the HuggingFace team
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch LXMERT model."""
+
+
+import math
+import os
+import warnings
+from dataclasses import dataclass
+from typing import Dict, Optional, Tuple, Union
+
+import torch
+from torch import nn
+from torch.nn import CrossEntropyLoss, SmoothL1Loss
+
+from ...activations import ACT2FN, gelu
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_lxmert import LxmertConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "unc-nlp/lxmert-base-uncased"
+_CONFIG_FOR_DOC = "LxmertConfig"
+
+
+class GeLU(nn.Module):
+ def __init__(self):
+ super().__init__()
+
+ def forward(self, x):
+ return gelu(x)
+
+
+@dataclass
+class LxmertModelOutput(ModelOutput):
+ """
+ Lxmert's outputs that contain the last hidden states, pooled outputs, and attention probabilities for the language,
+ visual, and, cross-modality encoders. (note: the visual encoder in Lxmert is referred to as the "relation-ship"
+ encoder")
+
+
+ Args:
+ language_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the language encoder.
+ vision_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the visual encoder.
+ pooled_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
+ Last layer hidden-state of the first token of the sequence (classification, CLS, token) further processed
+ by a Linear layer and a Tanh activation function. The Linear
+ language_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+ vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+ language_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ cross_encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ """
+
+ language_output: Optional[torch.FloatTensor] = None
+ vision_output: Optional[torch.FloatTensor] = None
+ pooled_output: Optional[torch.FloatTensor] = None
+ language_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ language_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ vision_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ cross_encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class LxmertForQuestionAnsweringOutput(ModelOutput):
+ """
+ Output type of [`LxmertForQuestionAnswering`].
+
+ Args:
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction
+ (classification) loss.k.
+ question_answering_score (`torch.FloatTensor` of shape `(batch_size, n_qa_answers)`, *optional*):
+ Prediction scores of question answering objective (classification).
+ language_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+ vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+ language_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ cross_encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ question_answering_score: Optional[torch.FloatTensor] = None
+ language_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ language_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ vision_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ cross_encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class LxmertForPreTrainingOutput(ModelOutput):
+ """
+ Output type of [`LxmertForPreTraining`].
+
+ Args:
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction
+ (classification) loss.
+ prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ cross_relationship_score (`torch.FloatTensor` of shape `(batch_size, 2)`):
+ Prediction scores of the textual matching objective (classification) head (scores of True/False
+ continuation before SoftMax).
+ question_answering_score (`torch.FloatTensor` of shape `(batch_size, n_qa_answers)`):
+ Prediction scores of question answering objective (classification).
+ language_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+ vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+ language_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ cross_encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ prediction_logits: Optional[torch.FloatTensor] = None
+ cross_relationship_score: Optional[torch.FloatTensor] = None
+ question_answering_score: Optional[torch.FloatTensor] = None
+ language_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ language_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ vision_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ cross_encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+def load_tf_weights_in_lxmert(model, config, tf_checkpoint_path):
+ """Load tf checkpoints in a pytorch model."""
+ try:
+ import re
+
+ import numpy as np
+ import tensorflow as tf
+ except ImportError:
+ logger.error(
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
+ "https://www.tensorflow.org/install/ for installation instructions."
+ )
+ raise
+ tf_path = os.path.abspath(tf_checkpoint_path)
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
+ # Load weights from TF model
+ init_vars = tf.train.list_variables(tf_path)
+ names = []
+ arrays = []
+ for name, shape in init_vars:
+ logger.info(f"Loading TF weight {name} with shape {shape}")
+ array = tf.train.load_variable(tf_path, name)
+ names.append(name)
+ arrays.append(array)
+
+ for name, array in zip(names, arrays):
+ name = name.split("/")
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
+ # which are not required for using pretrained model
+ if any(
+ n
+ in [
+ "adam_v",
+ "adam_m",
+ "AdamWeightDecayOptimizer",
+ "AdamWeightDecayOptimizer_1",
+ "global_step",
+ ]
+ for n in name
+ ):
+ logger.info(f"Skipping {'/'.join(name)}")
+ continue
+ pointer = model
+ for m_name in name:
+ if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
+ scope_names = re.split(r"_(\d+)", m_name)
+ else:
+ scope_names = [m_name]
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
+ pointer = getattr(pointer, "weight")
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
+ pointer = getattr(pointer, "bias")
+ elif scope_names[0] == "output_weights":
+ pointer = getattr(pointer, "weight")
+ elif scope_names[0] == "squad":
+ pointer = getattr(pointer, "classifier")
+ else:
+ try:
+ pointer = getattr(pointer, scope_names[0])
+ except AttributeError:
+ logger.info(f"Skipping {'/'.join(name)}")
+ continue
+ if len(scope_names) >= 2:
+ num = int(scope_names[1])
+ pointer = pointer[num]
+ if m_name[-11:] == "_embeddings":
+ pointer = getattr(pointer, "weight")
+ elif m_name == "kernel":
+ array = np.transpose(array)
+ try:
+ assert pointer.shape == array.shape
+ except AssertionError as e:
+ e.args += (pointer.shape, array.shape)
+ raise
+ logger.info(f"Initialize PyTorch weight {name}")
+ pointer.data = torch.from_numpy(array)
+ return model
+
+
+class LxmertEmbeddings(nn.Module):
+ """Construct the embeddings from word, position and token_type embeddings."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=0)
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size, padding_idx=0)
+
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
+ # any TensorFlow checkpoint file
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, input_ids, token_type_ids=None, inputs_embeds=None):
+ if input_ids is not None:
+ input_shape = input_ids.size()
+ device = input_ids.device
+ else:
+ input_shape = inputs_embeds.size()[:-1]
+ device = inputs_embeds.device
+ seq_length = input_shape[1]
+
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
+ position_ids = position_ids.unsqueeze(0).expand(input_shape)
+
+ if token_type_ids is None:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+ position_embeddings = self.position_embeddings(position_ids)
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
+
+ embeddings = inputs_embeds + position_embeddings + token_type_embeddings
+ embeddings = self.LayerNorm(embeddings)
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+
+class LxmertAttention(nn.Module):
+ def __init__(self, config, ctx_dim=None):
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0:
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads})"
+ )
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.head_size = self.num_attention_heads * self.attention_head_size
+
+ # visual_dim = 2048
+ if ctx_dim is None:
+ ctx_dim = config.hidden_size
+ self.query = nn.Linear(config.hidden_size, self.head_size)
+ self.key = nn.Linear(ctx_dim, self.head_size)
+ self.value = nn.Linear(ctx_dim, self.head_size)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+
+ def transpose_for_scores(self, x):
+ new_x_shape = x.size()[:-1] + (
+ self.num_attention_heads,
+ self.attention_head_size,
+ )
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(self, hidden_states, context, attention_mask=None, output_attentions=False):
+ mixed_query_layer = self.query(hidden_states)
+ mixed_key_layer = self.key(context)
+ mixed_value_layer = self.value(context)
+
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+ key_layer = self.transpose_for_scores(mixed_key_layer)
+ value_layer = self.transpose_for_scores(mixed_value_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
+ if attention_mask is not None:
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+ return outputs
+
+
+class LxmertAttentionOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states, input_tensor):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+class LxmertCrossAttentionLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.att = LxmertAttention(config)
+ self.output = LxmertAttentionOutput(config)
+
+ def forward(self, input_tensor, ctx_tensor, ctx_att_mask=None, output_attentions=False):
+ output = self.att(input_tensor, ctx_tensor, ctx_att_mask, output_attentions=output_attentions)
+ if output_attentions:
+ attention_probs = output[1]
+ attention_output = self.output(output[0], input_tensor)
+ outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
+ return outputs
+
+
+class LxmertSelfAttentionLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.self = LxmertAttention(config)
+ self.output = LxmertAttentionOutput(config)
+
+ def forward(self, input_tensor, attention_mask, output_attentions=False):
+ # Self attention attends to itself, thus keys and queries are the same (input_tensor).
+ output = self.self(
+ input_tensor,
+ input_tensor,
+ attention_mask,
+ output_attentions=output_attentions,
+ )
+ if output_attentions:
+ attention_probs = output[1]
+ attention_output = self.output(output[0], input_tensor)
+ outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
+ return outputs
+
+
+class LxmertIntermediate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+
+ def forward(self, hidden_states):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+class LxmertOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states, input_tensor):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+class LxmertLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.attention = LxmertSelfAttentionLayer(config)
+ self.intermediate = LxmertIntermediate(config)
+ self.output = LxmertOutput(config)
+
+ def forward(self, hidden_states, attention_mask=None, output_attentions=False):
+ outputs = self.attention(hidden_states, attention_mask, output_attentions=output_attentions)
+ attention_output = outputs[0]
+ intermediate_output = self.intermediate(attention_output)
+ layer_output = self.output(intermediate_output, attention_output)
+ outputs = (layer_output,) + outputs[1:] # add attentions if we output them
+ return outputs
+
+
+class LxmertXLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ # The cross-attention Layer
+ self.visual_attention = LxmertCrossAttentionLayer(config)
+
+ # Self-attention Layers
+ self.lang_self_att = LxmertSelfAttentionLayer(config)
+ self.visn_self_att = LxmertSelfAttentionLayer(config)
+
+ # Intermediate and Output Layers (FFNs)
+ self.lang_inter = LxmertIntermediate(config)
+ self.lang_output = LxmertOutput(config)
+ self.visn_inter = LxmertIntermediate(config)
+ self.visn_output = LxmertOutput(config)
+
+ def cross_att(
+ self,
+ lang_input,
+ lang_attention_mask,
+ visual_input,
+ visual_attention_mask,
+ output_x_attentions=False,
+ ):
+ # Cross Attention
+ lang_att_output = self.visual_attention(
+ lang_input,
+ visual_input,
+ ctx_att_mask=visual_attention_mask,
+ output_attentions=output_x_attentions,
+ )
+ visual_att_output = self.visual_attention(
+ visual_input,
+ lang_input,
+ ctx_att_mask=lang_attention_mask,
+ output_attentions=False,
+ )
+ return lang_att_output, visual_att_output
+
+ def self_att(self, lang_input, lang_attention_mask, visual_input, visual_attention_mask):
+ # Self Attention
+ lang_att_output = self.lang_self_att(lang_input, lang_attention_mask, output_attentions=False)
+ visual_att_output = self.visn_self_att(visual_input, visual_attention_mask, output_attentions=False)
+ return lang_att_output[0], visual_att_output[0]
+
+ def output_fc(self, lang_input, visual_input):
+ # FC layers
+ lang_inter_output = self.lang_inter(lang_input)
+ visual_inter_output = self.visn_inter(visual_input)
+
+ # Layer output
+ lang_output = self.lang_output(lang_inter_output, lang_input)
+ visual_output = self.visn_output(visual_inter_output, visual_input)
+
+ return lang_output, visual_output
+
+ def forward(
+ self,
+ lang_feats,
+ lang_attention_mask,
+ visual_feats,
+ visual_attention_mask,
+ output_attentions=False,
+ ):
+ lang_att_output, visual_att_output = self.cross_att(
+ lang_input=lang_feats,
+ lang_attention_mask=lang_attention_mask,
+ visual_input=visual_feats,
+ visual_attention_mask=visual_attention_mask,
+ output_x_attentions=output_attentions,
+ )
+ attention_probs = lang_att_output[1:]
+ lang_att_output, visual_att_output = self.self_att(
+ lang_att_output[0],
+ lang_attention_mask,
+ visual_att_output[0],
+ visual_attention_mask,
+ )
+
+ lang_output, visual_output = self.output_fc(lang_att_output, visual_att_output)
+ return (
+ (
+ lang_output,
+ visual_output,
+ attention_probs[0],
+ )
+ if output_attentions
+ else (lang_output, visual_output)
+ )
+
+
+class LxmertVisualFeatureEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ feat_dim = config.visual_feat_dim
+ pos_dim = config.visual_pos_dim
+
+ # Object feature encoding
+ self.visn_fc = nn.Linear(feat_dim, config.hidden_size)
+ self.visn_layer_norm = nn.LayerNorm(config.hidden_size, eps=1e-12)
+
+ # Box position encoding
+ self.box_fc = nn.Linear(pos_dim, config.hidden_size)
+ self.box_layer_norm = nn.LayerNorm(config.hidden_size, eps=1e-12)
+
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, visual_feats, visual_pos):
+ x = self.visn_fc(visual_feats)
+ x = self.visn_layer_norm(x)
+ y = self.box_fc(visual_pos)
+ y = self.box_layer_norm(y)
+ output = (x + y) / 2
+
+ output = self.dropout(output)
+ return output
+
+
+class LxmertEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+
+ # Obj-level image embedding layer
+ self.visn_fc = LxmertVisualFeatureEncoder(config)
+ self.config = config
+
+ # Number of layers
+ self.num_l_layers = config.l_layers
+ self.num_x_layers = config.x_layers
+ self.num_r_layers = config.r_layers
+
+ # Layers
+ # Using self.layer instead of self.l_layer to support loading BERT weights.
+ self.layer = nn.ModuleList([LxmertLayer(config) for _ in range(self.num_l_layers)])
+ self.x_layers = nn.ModuleList([LxmertXLayer(config) for _ in range(self.num_x_layers)])
+ self.r_layers = nn.ModuleList([LxmertLayer(config) for _ in range(self.num_r_layers)])
+
+ def forward(
+ self,
+ lang_feats,
+ lang_attention_mask,
+ visual_feats,
+ visual_pos,
+ visual_attention_mask=None,
+ output_attentions=None,
+ ):
+ vision_hidden_states = ()
+ language_hidden_states = ()
+ vision_attentions = () if output_attentions or self.config.output_attentions else None
+ language_attentions = () if output_attentions or self.config.output_attentions else None
+ cross_encoder_attentions = () if output_attentions or self.config.output_attentions else None
+
+ visual_feats = self.visn_fc(visual_feats, visual_pos)
+
+ # Run language layers
+ for layer_module in self.layer:
+ l_outputs = layer_module(lang_feats, lang_attention_mask, output_attentions=output_attentions)
+ lang_feats = l_outputs[0]
+ language_hidden_states = language_hidden_states + (lang_feats,)
+ if language_attentions is not None:
+ language_attentions = language_attentions + (l_outputs[1],)
+
+ # Run relational layers
+ for layer_module in self.r_layers:
+ v_outputs = layer_module(visual_feats, visual_attention_mask, output_attentions=output_attentions)
+ visual_feats = v_outputs[0]
+ vision_hidden_states = vision_hidden_states + (visual_feats,)
+ if vision_attentions is not None:
+ vision_attentions = vision_attentions + (v_outputs[1],)
+
+ # Run cross-modality layers
+ for layer_module in self.x_layers:
+ x_outputs = layer_module(
+ lang_feats,
+ lang_attention_mask,
+ visual_feats,
+ visual_attention_mask,
+ output_attentions=output_attentions,
+ )
+ lang_feats, visual_feats = x_outputs[:2]
+ vision_hidden_states = vision_hidden_states + (visual_feats,)
+ language_hidden_states = language_hidden_states + (lang_feats,)
+ if cross_encoder_attentions is not None:
+ cross_encoder_attentions = cross_encoder_attentions + (x_outputs[2],)
+ visual_encoder_outputs = (
+ vision_hidden_states,
+ vision_attentions if output_attentions else None,
+ )
+ lang_encoder_outputs = (
+ language_hidden_states,
+ language_attentions if output_attentions else None,
+ )
+ return (
+ visual_encoder_outputs,
+ lang_encoder_outputs,
+ cross_encoder_attentions if output_attentions else None,
+ )
+
+
+class LxmertPooler(nn.Module):
+ def __init__(self, config):
+ super(LxmertPooler, self).__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.activation = nn.Tanh()
+
+ def forward(self, hidden_states):
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(first_token_tensor)
+ pooled_output = self.activation(pooled_output)
+ return pooled_output
+
+
+class LxmertPredictionHeadTransform(nn.Module):
+ def __init__(self, config):
+ super(LxmertPredictionHeadTransform, self).__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.transform_act_fn = ACT2FN[config.hidden_act]
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
+
+ def forward(self, hidden_states):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.transform_act_fn(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states)
+ return hidden_states
+
+
+class LxmertLMPredictionHead(nn.Module):
+ def __init__(self, config, lxmert_model_embedding_weights):
+ super(LxmertLMPredictionHead, self).__init__()
+ self.transform = LxmertPredictionHeadTransform(config)
+
+ # The output weights are the same as the input embeddings, but there is
+ # an output-only bias for each token.
+ self.decoder = nn.Linear(
+ lxmert_model_embedding_weights.size(1),
+ lxmert_model_embedding_weights.size(0),
+ bias=False,
+ )
+ self.decoder.weight = lxmert_model_embedding_weights
+ self.bias = nn.Parameter(torch.zeros(lxmert_model_embedding_weights.size(0)))
+
+ def forward(self, hidden_states):
+ hidden_states = self.transform(hidden_states)
+ hidden_states = self.decoder(hidden_states) + self.bias
+ return hidden_states
+
+
+class LxmertVisualAnswerHead(nn.Module):
+ def __init__(self, config, num_labels):
+ super().__init__()
+ hid_dim = config.hidden_size
+ self.logit_fc = nn.Sequential(
+ nn.Linear(hid_dim, hid_dim * 2),
+ GeLU(),
+ nn.LayerNorm(hid_dim * 2, eps=1e-12),
+ nn.Linear(hid_dim * 2, num_labels),
+ )
+
+ def forward(self, hidden_states):
+ return self.logit_fc(hidden_states)
+
+
+class LxmertVisualObjHead(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.transform = LxmertPredictionHeadTransform(config)
+ # Decide the use of visual losses
+ visual_losses = {}
+ if config.visual_obj_loss:
+ visual_losses["obj"] = {"shape": (-1,), "num": config.num_object_labels}
+ if config.visual_attr_loss:
+ visual_losses["attr"] = {"shape": (-1,), "num": config.num_attr_labels}
+ if config.visual_feat_loss:
+ visual_losses["feat"] = {
+ "shape": (-1, config.visual_feat_dim),
+ "num": config.visual_feat_dim,
+ }
+ self.visual_losses = visual_losses
+
+ # The output weights are the same as the input embeddings, but there is
+ # an output-only bias for each token.
+ self.decoder_dict = nn.ModuleDict(
+ {key: nn.Linear(config.hidden_size, self.visual_losses[key]["num"]) for key in self.visual_losses}
+ )
+
+ def forward(self, hidden_states):
+ hidden_states = self.transform(hidden_states)
+ output = {}
+ for key in self.visual_losses:
+ output[key] = self.decoder_dict[key](hidden_states)
+ return output
+
+
+class LxmertPreTrainingHeads(nn.Module):
+ def __init__(self, config, lxmert_model_embedding_weights):
+ super(LxmertPreTrainingHeads, self).__init__()
+ self.predictions = LxmertLMPredictionHead(config, lxmert_model_embedding_weights)
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
+
+ def forward(self, sequence_output, pooled_output):
+ prediction_scores = self.predictions(sequence_output)
+ seq_relationship_score = self.seq_relationship(pooled_output)
+ return prediction_scores, seq_relationship_score
+
+
+class LxmertPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = LxmertConfig
+ load_tf_weights = load_tf_weights_in_lxmert
+ base_model_prefix = "lxmert"
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+LXMERT_START_DOCSTRING = r"""
+
+ The LXMERT model was proposed in [LXMERT: Learning Cross-Modality Encoder Representations from
+ Transformers](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal. It's a vision and language transformer
+ model, pretrained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MSCOCO captions, and Visual
+ genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss
+ for question answering attribute prediction, and object tag prediction.
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`LxmertConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+LXMERT_INPUTS_DOCSTRING = r"""
+
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ visual_feats (`torch.FloatTensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`):
+ This input represents visual features. They ROI pooled object features from bounding boxes using a
+ faster-RCNN model)
+
+ These are currently not provided by the transformers library.
+ visual_pos (`torch.FloatTensor` of shape `(batch_size, num_visual_features, visual_pos_dim)`):
+ This input represents spacial features corresponding to their relative (via index) visual features. The
+ pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to
+ 1.
+
+ These are currently not provided by the transformers library.
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ visual_attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare Lxmert Model transformer outputting raw hidden-states without any specific head on top.",
+ LXMERT_START_DOCSTRING,
+)
+class LxmertModel(LxmertPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.embeddings = LxmertEmbeddings(config)
+ self.encoder = LxmertEncoder(config)
+ self.pooler = LxmertPooler(config)
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, new_embeddings):
+ self.embeddings.word_embeddings = new_embeddings
+
+ @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=LxmertModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ visual_feats: Optional[torch.FloatTensor] = None,
+ visual_pos: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ visual_attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[LxmertModelOutput, Tuple[torch.FloatTensor]]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if visual_feats is None:
+ raise ValueError("`visual_feats` cannot be `None`")
+ if visual_pos is None:
+ raise ValueError("`visual_pos` cannot be `None`")
+
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ if attention_mask is None:
+ attention_mask = torch.ones(input_shape, device=device)
+ if token_type_ids is None:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
+
+ # We create a 3D attention mask from a 2D tensor mask.
+ # Sizes are [batch_size, 1, 1, to_seq_length]
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
+ # this attention mask is more simple than the triangular masking of causal attention
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
+ extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
+
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
+ # masked positions, this operation will create a tensor which is 0.0 for
+ # positions we want to attend and the dtype's smallest value for masked positions.
+ # Since we are adding it to the raw scores before the softmax, this is
+ # effectively the same as removing these entirely.
+ extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)
+ extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(self.dtype).min
+
+ # Process the visual attention mask
+ if visual_attention_mask is not None:
+ extended_visual_attention_mask = visual_attention_mask.unsqueeze(1).unsqueeze(2)
+ extended_visual_attention_mask = extended_visual_attention_mask.to(dtype=self.dtype)
+ extended_visual_attention_mask = (1.0 - extended_visual_attention_mask) * torch.finfo(self.dtype).min
+ else:
+ extended_visual_attention_mask = None
+
+ # Positional Word Embeddings
+ embedding_output = self.embeddings(input_ids, token_type_ids, inputs_embeds)
+
+ # Run Lxmert encoder
+ encoder_outputs = self.encoder(
+ embedding_output,
+ extended_attention_mask,
+ visual_feats=visual_feats,
+ visual_pos=visual_pos,
+ visual_attention_mask=extended_visual_attention_mask,
+ output_attentions=output_attentions,
+ )
+
+ visual_encoder_outputs, lang_encoder_outputs = encoder_outputs[:2]
+ vision_hidden_states = visual_encoder_outputs[0]
+ language_hidden_states = lang_encoder_outputs[0]
+
+ all_attentions = ()
+ if output_attentions:
+ language_attentions = lang_encoder_outputs[1]
+ vision_attentions = visual_encoder_outputs[1]
+ cross_encoder_attentions = encoder_outputs[2]
+ all_attentions = (
+ language_attentions,
+ vision_attentions,
+ cross_encoder_attentions,
+ )
+
+ hidden_states = (language_hidden_states, vision_hidden_states) if output_hidden_states else ()
+
+ visual_output = vision_hidden_states[-1]
+ lang_output = language_hidden_states[-1]
+ pooled_output = self.pooler(lang_output)
+
+ if not return_dict:
+ return (lang_output, visual_output, pooled_output) + hidden_states + all_attentions
+
+ return LxmertModelOutput(
+ pooled_output=pooled_output,
+ language_output=lang_output,
+ vision_output=visual_output,
+ language_hidden_states=language_hidden_states if output_hidden_states else None,
+ vision_hidden_states=vision_hidden_states if output_hidden_states else None,
+ language_attentions=language_attentions if output_attentions else None,
+ vision_attentions=vision_attentions if output_attentions else None,
+ cross_encoder_attentions=cross_encoder_attentions if output_attentions else None,
+ )
+
+
+@add_start_docstrings(
+ """Lxmert Model with a specified pretraining head on top.""",
+ LXMERT_START_DOCSTRING,
+)
+class LxmertForPreTraining(LxmertPreTrainedModel):
+ _tied_weights_keys = ["cls.predictions.decoder.weight"]
+
+ def __init__(self, config):
+ super().__init__(config)
+ # Configuration
+ self.config = config
+ self.num_qa_labels = config.num_qa_labels
+ self.visual_loss_normalizer = config.visual_loss_normalizer
+
+ # Use of pretraining tasks
+ self.task_mask_lm = config.task_mask_lm
+ self.task_obj_predict = config.task_obj_predict
+ self.task_matched = config.task_matched
+ self.task_qa = config.task_qa
+
+ # Lxmert backbone
+ self.lxmert = LxmertModel(config)
+
+ # Pre-training heads
+ self.cls = LxmertPreTrainingHeads(config, self.lxmert.embeddings.word_embeddings.weight)
+ if self.task_obj_predict:
+ self.obj_predict_head = LxmertVisualObjHead(config)
+ if self.task_qa:
+ self.answer_head = LxmertVisualAnswerHead(config, self.num_qa_labels)
+
+ # Weight initialization
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ # Loss functions
+ self.loss_fcts = {
+ "l2": SmoothL1Loss(reduction="none"),
+ "visual_ce": CrossEntropyLoss(reduction="none"),
+ "ce": CrossEntropyLoss(),
+ }
+
+ visual_losses = {}
+ if config.visual_obj_loss:
+ visual_losses["obj"] = {
+ "shape": (-1,),
+ "num": config.num_object_labels,
+ "loss": "visual_ce",
+ }
+ if config.visual_attr_loss:
+ visual_losses["attr"] = {
+ "shape": (-1,),
+ "num": config.num_attr_labels,
+ "loss": "visual_ce",
+ }
+ if config.visual_feat_loss:
+ visual_losses["feat"] = {
+ "shape": (-1, config.visual_feat_dim),
+ "num": config.visual_feat_dim,
+ "loss": "l2",
+ }
+ self.visual_losses = visual_losses
+
+ def resize_num_qa_labels(self, num_labels):
+ """
+ Build a resized question answering linear layer Module from a provided new linear layer. Increasing the size
+ will add newly initialized weights. Reducing the size will remove weights from the end
+
+ Args:
+ num_labels (`int`, *optional*):
+ New number of labels in the linear layer weight matrix. Increasing the size will add newly initialized
+ weights at the end. Reducing the size will remove weights from the end. If not provided or `None`, just
+ returns a pointer to the qa labels ``torch.nn.Linear``` module of the model without doing anything.
+
+ Return:
+ `torch.nn.Linear`: Pointer to the resized Linear layer or the old Linear layer
+ """
+
+ cur_qa_logit_layer = self.get_qa_logit_layer()
+ if num_labels is None or cur_qa_logit_layer is None:
+ return
+ new_qa_logit_layer = self._resize_qa_labels(num_labels)
+ self.config.num_qa_labels = num_labels
+ self.num_qa_labels = num_labels
+
+ return new_qa_logit_layer
+
+ def _resize_qa_labels(self, num_labels):
+ cur_qa_logit_layer = self.get_qa_logit_layer()
+ new_qa_logit_layer = self._get_resized_qa_labels(cur_qa_logit_layer, num_labels)
+ self._set_qa_logit_layer(new_qa_logit_layer)
+ return self.get_qa_logit_layer()
+
+ def get_qa_logit_layer(self) -> nn.Module:
+ """
+ Returns the linear layer that produces question answering logits.
+
+ Returns:
+ `nn.Module`: A torch module mapping the question answering prediction hidden states or `None` if LXMERT
+ does not have a visual answering head.
+ """
+ if hasattr(self, "answer_head"):
+ return self.answer_head.logit_fc[-1]
+
+ def _set_qa_logit_layer(self, qa_logit_layer):
+ self.answer_head.logit_fc[-1] = qa_logit_layer
+
+ def _get_resized_qa_labels(self, cur_qa_logit_layer, num_labels):
+ if num_labels is None:
+ return cur_qa_logit_layer
+
+ cur_qa_labels, hidden_dim = cur_qa_logit_layer.weight.size()
+ if cur_qa_labels == num_labels:
+ return cur_qa_logit_layer
+
+ # Build new linear output
+ if getattr(cur_qa_logit_layer, "bias", None) is not None:
+ new_qa_logit_layer = nn.Linear(hidden_dim, num_labels)
+ else:
+ new_qa_logit_layer = nn.Linear(hidden_dim, num_labels, bias=False)
+
+ new_qa_logit_layer.to(cur_qa_logit_layer.weight.device)
+
+ # initialize all new labels
+ self._init_weights(new_qa_logit_layer)
+
+ # Copy labels from the previous weights
+ num_labels_to_copy = min(cur_qa_labels, num_labels)
+ new_qa_logit_layer.weight.data[:num_labels_to_copy, :] = cur_qa_logit_layer.weight.data[:num_labels_to_copy, :]
+ if getattr(cur_qa_logit_layer, "bias", None) is not None:
+ new_qa_logit_layer.bias.data[:num_labels_to_copy] = cur_qa_logit_layer.bias.data[:num_labels_to_copy]
+
+ return new_qa_logit_layer
+
+ @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=LxmertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ visual_feats: Optional[torch.FloatTensor] = None,
+ visual_pos: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ visual_attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ obj_labels: Optional[Dict[str, Tuple[torch.FloatTensor, torch.FloatTensor]]] = None,
+ matched_label: Optional[torch.LongTensor] = None,
+ ans: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs,
+ ) -> Union[LxmertForPreTrainingOutput, Tuple[torch.FloatTensor]]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ obj_labels (`Dict[Str: Tuple[Torch.FloatTensor, Torch.FloatTensor]]`, *optional*):
+ each key is named after each one of the visual losses and each element of the tuple is of the shape
+ `(batch_size, num_features)` and `(batch_size, num_features, visual_feature_dim)` for each the label id and
+ the label score respectively
+ matched_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the whether or not the text input matches the image (classification) loss. Input
+ should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`:
+
+ - 0 indicates that the sentence does not match the image,
+ - 1 indicates that the sentence does match the image.
+ ans (`Torch.Tensor` of shape `(batch_size)`, *optional*):
+ a one hot representation hof the correct answer *optional*
+
+ Returns:
+ """
+
+ if "masked_lm_labels" in kwargs:
+ warnings.warn(
+ "The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels`"
+ " instead.",
+ FutureWarning,
+ )
+ labels = kwargs.pop("masked_lm_labels")
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+ lxmert_output = self.lxmert(
+ input_ids=input_ids,
+ visual_feats=visual_feats,
+ visual_pos=visual_pos,
+ token_type_ids=token_type_ids,
+ attention_mask=attention_mask,
+ visual_attention_mask=visual_attention_mask,
+ inputs_embeds=inputs_embeds,
+ output_hidden_states=output_hidden_states,
+ output_attentions=output_attentions,
+ return_dict=return_dict,
+ )
+
+ lang_output, visual_output, pooled_output = (
+ lxmert_output[0],
+ lxmert_output[1],
+ lxmert_output[2],
+ )
+ lang_prediction_scores, cross_relationship_score = self.cls(lang_output, pooled_output)
+ if self.task_qa:
+ answer_score = self.answer_head(pooled_output)
+ else:
+ answer_score = pooled_output[0][0]
+
+ total_loss = (
+ None
+ if (labels is None and matched_label is None and obj_labels is None and ans is None)
+ else torch.tensor(0.0, device=device)
+ )
+ if labels is not None and self.task_mask_lm:
+ masked_lm_loss = self.loss_fcts["ce"](
+ lang_prediction_scores.view(-1, self.config.vocab_size),
+ labels.view(-1),
+ )
+ total_loss += masked_lm_loss
+ if matched_label is not None and self.task_matched:
+ matched_loss = self.loss_fcts["ce"](cross_relationship_score.view(-1, 2), matched_label.view(-1))
+ total_loss += matched_loss
+ if obj_labels is not None and self.task_obj_predict:
+ total_visual_loss = torch.tensor(0.0, device=input_ids.device)
+ visual_prediction_scores_dict = self.obj_predict_head(visual_output)
+ for key, key_info in self.visual_losses.items():
+ label, mask_conf = obj_labels[key]
+ output_dim = key_info["num"]
+ loss_fct_name = key_info["loss"]
+ label_shape = key_info["shape"]
+ weight = self.visual_loss_normalizer
+ visual_loss_fct = self.loss_fcts[loss_fct_name]
+ visual_prediction_scores = visual_prediction_scores_dict[key]
+ visual_loss = visual_loss_fct(
+ visual_prediction_scores.view(-1, output_dim),
+ label.view(label_shape),
+ )
+ if visual_loss.dim() > 1: # Regression Losses
+ visual_loss = visual_loss.mean(1)
+ visual_loss = (visual_loss * mask_conf.view(-1)).mean() * weight
+ total_visual_loss += visual_loss
+ total_loss += total_visual_loss
+ if ans is not None and self.task_qa:
+ answer_loss = self.loss_fcts["ce"](answer_score.view(-1, self.num_qa_labels), ans.view(-1))
+ total_loss += answer_loss
+
+ if not return_dict:
+ output = (
+ lang_prediction_scores,
+ cross_relationship_score,
+ answer_score,
+ ) + lxmert_output[3:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return LxmertForPreTrainingOutput(
+ loss=total_loss,
+ prediction_logits=lang_prediction_scores,
+ cross_relationship_score=cross_relationship_score,
+ question_answering_score=answer_score,
+ language_hidden_states=lxmert_output.language_hidden_states,
+ vision_hidden_states=lxmert_output.vision_hidden_states,
+ language_attentions=lxmert_output.language_attentions,
+ vision_attentions=lxmert_output.vision_attentions,
+ cross_encoder_attentions=lxmert_output.cross_encoder_attentions,
+ )
+
+
+@add_start_docstrings(
+ """Lxmert Model with a visual-answering head on top for downstream QA tasks""",
+ LXMERT_START_DOCSTRING,
+)
+class LxmertForQuestionAnswering(LxmertPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ # Configuration
+ self.config = config
+ self.num_qa_labels = config.num_qa_labels
+ self.visual_loss_normalizer = config.visual_loss_normalizer
+
+ # Lxmert backbone
+ self.lxmert = LxmertModel(config)
+
+ self.answer_head = LxmertVisualAnswerHead(config, self.num_qa_labels)
+
+ # Weight initialization
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ # Loss function
+ self.loss = CrossEntropyLoss()
+
+ def resize_num_qa_labels(self, num_labels):
+ """
+ Build a resized question answering linear layer Module from a provided new linear layer. Increasing the size
+ will add newly initialized weights. Reducing the size will remove weights from the end
+
+ Args:
+ num_labels (`int`, *optional*):
+ New number of labels in the linear layer weight matrix. Increasing the size will add newly initialized
+ weights at the end. Reducing the size will remove weights from the end. If not provided or `None`, just
+ returns a pointer to the qa labels ``torch.nn.Linear``` module of the model without doing anything.
+
+ Return:
+ `torch.nn.Linear`: Pointer to the resized Linear layer or the old Linear layer
+ """
+
+ cur_qa_logit_layer = self.get_qa_logit_layer()
+ if num_labels is None or cur_qa_logit_layer is None:
+ return
+ new_qa_logit_layer = self._resize_qa_labels(num_labels)
+ self.config.num_qa_labels = num_labels
+ self.num_qa_labels = num_labels
+
+ return new_qa_logit_layer
+
+ def _resize_qa_labels(self, num_labels):
+ cur_qa_logit_layer = self.get_qa_logit_layer()
+ new_qa_logit_layer = self._get_resized_qa_labels(cur_qa_logit_layer, num_labels)
+ self._set_qa_logit_layer(new_qa_logit_layer)
+ return self.get_qa_logit_layer()
+
+ def get_qa_logit_layer(self) -> nn.Module:
+ """
+ Returns the linear layer that produces question answering logits
+
+ Returns:
+ `nn.Module`: A torch module mapping the question answering prediction hidden states. `None`: A NoneType
+ object if Lxmert does not have the visual answering head.
+ """
+
+ if hasattr(self, "answer_head"):
+ return self.answer_head.logit_fc[-1]
+
+ def _set_qa_logit_layer(self, qa_logit_layer):
+ self.answer_head.logit_fc[-1] = qa_logit_layer
+
+ def _get_resized_qa_labels(self, cur_qa_logit_layer, num_labels):
+ if num_labels is None:
+ return cur_qa_logit_layer
+
+ cur_qa_labels, hidden_dim = cur_qa_logit_layer.weight.size()
+ if cur_qa_labels == num_labels:
+ return cur_qa_logit_layer
+
+ # Build new linear output
+ if getattr(cur_qa_logit_layer, "bias", None) is not None:
+ new_qa_logit_layer = nn.Linear(hidden_dim, num_labels)
+ else:
+ new_qa_logit_layer = nn.Linear(hidden_dim, num_labels, bias=False)
+
+ new_qa_logit_layer.to(cur_qa_logit_layer.weight.device)
+
+ # initialize all new labels
+ self._init_weights(new_qa_logit_layer)
+
+ # Copy labels from the previous weights
+ num_labels_to_copy = min(cur_qa_labels, num_labels)
+ new_qa_logit_layer.weight.data[:num_labels_to_copy, :] = cur_qa_logit_layer.weight.data[:num_labels_to_copy, :]
+ if getattr(cur_qa_logit_layer, "bias", None) is not None:
+ new_qa_logit_layer.bias.data[:num_labels_to_copy] = cur_qa_logit_layer.bias.data[:num_labels_to_copy]
+
+ return new_qa_logit_layer
+
+ @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=LxmertForQuestionAnsweringOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ visual_feats: Optional[torch.FloatTensor] = None,
+ visual_pos: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ visual_attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[LxmertForQuestionAnsweringOutput, Tuple[torch.FloatTensor]]:
+ r"""
+ labels (`Torch.Tensor` of shape `(batch_size)`, *optional*):
+ A one-hot representation of the correct answer
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ lxmert_output = self.lxmert(
+ input_ids=input_ids,
+ visual_feats=visual_feats,
+ visual_pos=visual_pos,
+ token_type_ids=token_type_ids,
+ attention_mask=attention_mask,
+ visual_attention_mask=visual_attention_mask,
+ inputs_embeds=inputs_embeds,
+ output_hidden_states=output_hidden_states,
+ output_attentions=output_attentions,
+ return_dict=return_dict,
+ )
+
+ pooled_output = lxmert_output[2]
+ answer_score = self.answer_head(pooled_output)
+ loss = None
+ if labels is not None:
+ loss = self.loss(answer_score.view(-1, self.num_qa_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (answer_score,) + lxmert_output[3:]
+ return (loss,) + output if loss is not None else output
+
+ return LxmertForQuestionAnsweringOutput(
+ loss=loss,
+ question_answering_score=answer_score,
+ language_hidden_states=lxmert_output.language_hidden_states,
+ vision_hidden_states=lxmert_output.vision_hidden_states,
+ language_attentions=lxmert_output.language_attentions,
+ vision_attentions=lxmert_output.vision_attentions,
+ cross_encoder_attentions=lxmert_output.cross_encoder_attentions,
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/modeling_tf_lxmert.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/modeling_tf_lxmert.py
new file mode 100644
index 0000000000000000000000000000000000000000..c4741196031a793c2e58e6c03392b3b52d76fc79
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/modeling_tf_lxmert.py
@@ -0,0 +1,1656 @@
+# coding=utf-8
+# Copyright 2018 The Google AI Language Team Authors, The HuggingFace Inc. team, and the
+# Lxmert Authors.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" TF 2.0 LXMERT model."""
+
+
+from __future__ import annotations
+
+import warnings
+from dataclasses import dataclass
+from typing import Dict, Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ...activations_tf import get_tf_activation
+from ...modeling_tf_utils import (
+ TFModelInputType,
+ TFPreTrainedModel,
+ get_initializer,
+ keras,
+ keras_serializable,
+ shape_list,
+ unpack_inputs,
+)
+from ...tf_utils import check_embeddings_within_bounds, stable_softmax
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_lxmert import LxmertConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "unc-nlp/lxmert-base-uncased"
+_CONFIG_FOR_DOC = "LxmertConfig"
+
+
+from ..deprecated._archive_maps import TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+@dataclass
+class TFLxmertModelOutput(ModelOutput):
+ """
+ Lxmert's outputs that contain the last hidden states, pooled outputs, and attention probabilities for the language,
+ visual, and, cross-modality encoders. (note: the visual encoder in Lxmert is referred to as the "relation-ship"
+ encoder")
+
+
+ Args:
+ language_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the language encoder.
+ vision_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the visual encoder.
+ pooled_output (`tf.Tensor` of shape `(batch_size, hidden_size)`):
+ Last layer hidden-state of the first token of the sequence (classification, CLS, token) further processed
+ by a Linear layer and a Tanh activation function. The Linear
+ language_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+ vision_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+ language_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ vision_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ cross_encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ """
+
+ language_output: tf.Tensor | None = None
+ vision_output: tf.Tensor | None = None
+ pooled_output: tf.Tensor | None = None
+ language_hidden_states: Tuple[tf.Tensor] | None = None
+ vision_hidden_states: Tuple[tf.Tensor] | None = None
+ language_attentions: Tuple[tf.Tensor] | None = None
+ vision_attentions: Tuple[tf.Tensor] | None = None
+ cross_encoder_attentions: Tuple[tf.Tensor] | None = None
+
+
+@dataclass
+class TFLxmertForPreTrainingOutput(ModelOutput):
+ """
+ Output type of [`LxmertForPreTraining`].
+
+ Args:
+ loss (*optional*, returned when `labels` is provided, `tf.Tensor` of shape `(1,)`):
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction
+ (classification) loss.
+ prediction_logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ cross_relationship_score (`tf.Tensor` of shape `(batch_size, 2)`):
+ Prediction scores of the textual matching objective (classification) head (scores of True/False
+ continuation before SoftMax).
+ question_answering_score (`tf.Tensor` of shape `(batch_size, n_qa_answers)`):
+ Prediction scores of question answering objective (classification).
+ language_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+ vision_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+ language_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ vision_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ cross_encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+
+ """
+
+ loss: tf.Tensor | None = None
+ prediction_logits: tf.Tensor | None = None
+ cross_relationship_score: tf.Tensor | None = None
+ question_answering_score: tf.Tensor | None = None
+ language_hidden_states: Tuple[tf.Tensor] | None = None
+ vision_hidden_states: Tuple[tf.Tensor] | None = None
+ language_attentions: Tuple[tf.Tensor] | None = None
+ vision_attentions: Tuple[tf.Tensor] | None = None
+ cross_encoder_attentions: Tuple[tf.Tensor] | None = None
+
+
+class TFLxmertVisualFeatureEncoder(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ # Object feature encoding
+ self.visn_fc = keras.layers.Dense(
+ config.hidden_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="visn_fc",
+ )
+ self.visn_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="visn_layer_norm")
+
+ # Box position encoding
+ self.box_fc = keras.layers.Dense(
+ config.hidden_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="box_fc",
+ )
+ self.box_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="box_layer_norm")
+
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
+ self.feat_dim = config.visual_feat_dim
+ self.pos_dim = config.visual_pos_dim
+ self.config = config
+
+ def call(self, visn_input, training=False):
+ feats, boxes = visn_input
+
+ x = self.visn_fc(feats)
+ x = self.visn_layer_norm(x)
+ y = self.box_fc(boxes)
+ y = self.box_layer_norm(y)
+ output = (x + y) / 2
+
+ output = self.dropout(output, training=training)
+ return output
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "visn_fc", None) is not None:
+ with tf.name_scope(self.visn_fc.name):
+ self.visn_fc.build([None, None, self.feat_dim])
+ if getattr(self, "visn_layer_norm", None) is not None:
+ with tf.name_scope(self.visn_layer_norm.name):
+ self.visn_layer_norm.build([None, None, self.config.hidden_size])
+ if getattr(self, "box_fc", None) is not None:
+ with tf.name_scope(self.box_fc.name):
+ self.box_fc.build([None, None, self.pos_dim])
+ if getattr(self, "box_layer_norm", None) is not None:
+ with tf.name_scope(self.box_layer_norm.name):
+ self.box_layer_norm.build([None, None, self.config.hidden_size])
+
+
+class TFLxmertEmbeddings(keras.layers.Layer):
+ """Construct the embeddings from word, position and token_type embeddings."""
+
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.hidden_size = config.hidden_size
+ self.max_position_embeddings = config.max_position_embeddings
+ self.initializer_range = config.initializer_range
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
+
+ def build(self, input_shape=None):
+ with tf.name_scope("word_embeddings"):
+ self.weight = self.add_weight(
+ name="weight",
+ shape=[self.config.vocab_size, self.hidden_size],
+ initializer=get_initializer(initializer_range=self.initializer_range),
+ )
+
+ with tf.name_scope("token_type_embeddings"):
+ self.token_type_embeddings = self.add_weight(
+ name="embeddings",
+ shape=[self.config.type_vocab_size, self.hidden_size],
+ initializer=get_initializer(initializer_range=self.initializer_range),
+ )
+
+ with tf.name_scope("position_embeddings"):
+ self.position_embeddings = self.add_weight(
+ name="embeddings",
+ shape=[self.max_position_embeddings, self.hidden_size],
+ initializer=get_initializer(initializer_range=self.initializer_range),
+ )
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.hidden_size])
+
+ def call(self, input_ids=None, token_type_ids=None, inputs_embeds=None, training=False):
+ """
+ Applies embedding based on inputs tensor.
+
+ Returns:
+ final_embeddings (`tf.Tensor`): output embedding tensor.
+ """
+ assert not (input_ids is None and inputs_embeds is None)
+
+ if input_ids is not None:
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
+
+ input_shape = shape_list(inputs_embeds)[:-1]
+
+ if token_type_ids is None:
+ token_type_ids = tf.fill(dims=input_shape, value=0)
+
+ position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
+ token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
+ final_embeddings = inputs_embeds + position_embeds + token_type_embeds
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
+
+ return final_embeddings
+
+
+class TFLxmertAttention(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ if config.hidden_size % config.num_attention_heads != 0:
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads}"
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ assert config.hidden_size % config.num_attention_heads == 0
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = keras.layers.Dense(
+ self.all_head_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="query",
+ )
+ self.key = keras.layers.Dense(
+ self.all_head_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="key",
+ )
+ self.value = keras.layers.Dense(
+ self.all_head_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="value",
+ )
+
+ self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob)
+ self.ctx_dim = config.hidden_size
+ self.config = config
+
+ def transpose_for_scores(self, x, batch_size):
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
+ x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size))
+ return tf.transpose(x, perm=[0, 2, 1, 3])
+
+ def call(self, hidden_states, context, attention_mask, output_attentions, training=False):
+ batch_size = shape_list(hidden_states)[0]
+ mixed_query_layer = self.query(hidden_states)
+ mixed_key_layer = self.key(context)
+ mixed_value_layer = self.value(context)
+
+ query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
+ key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
+ value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = tf.matmul(
+ query_layer, key_layer, transpose_b=True
+ ) # (batch size, num_heads, seq_len_q, seq_len_k)
+ dk = tf.cast(shape_list(key_layer)[-1], dtype=attention_scores.dtype) # scale attention_scores
+ attention_scores = attention_scores / tf.math.sqrt(dk)
+
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in TFLxmertModel call() function)
+ attention_mask = tf.cast(attention_mask, dtype=attention_scores.dtype)
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = stable_softmax(attention_scores, axis=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs, training=training)
+ context_layer = tf.matmul(attention_probs, value_layer)
+
+ context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
+ context_layer = tf.reshape(
+ context_layer, (batch_size, -1, self.all_head_size)
+ ) # (batch_size, seq_len_q, all_head_size)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "query", None) is not None:
+ with tf.name_scope(self.query.name):
+ self.query.build([None, None, self.config.hidden_size])
+ if getattr(self, "key", None) is not None:
+ with tf.name_scope(self.key.name):
+ self.key.build([None, None, self.ctx_dim])
+ if getattr(self, "value", None) is not None:
+ with tf.name_scope(self.value.name):
+ self.value.build([None, None, self.ctx_dim])
+
+
+class TFLxmertIntermediate(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.dense = keras.layers.Dense(
+ config.intermediate_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="dense",
+ )
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
+ else:
+ self.intermediate_act_fn = config.hidden_act
+ self.config = config
+
+ def call(self, hidden_states):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+
+
+class TFLxmertOutput(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.dense = keras.layers.Dense(
+ config.hidden_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="dense",
+ )
+
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
+ self.config = config
+
+ def call(self, hidden_states, input_tensor, training=False):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states, training)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.intermediate_size])
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.hidden_size])
+
+
+class TFLxmertAttentionOutput(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.dense = keras.layers.Dense(
+ config.hidden_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="dense",
+ )
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
+ self.config = config
+
+ def call(self, hidden_states, input_tensor, training=False):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.hidden_size])
+
+
+class TFLxmertSelfAttentionLayer(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.self = TFLxmertAttention(config, name="self")
+ self.attention_output = TFLxmertAttentionOutput(config, name="output")
+
+ def call(self, input_tensor, attention_mask, output_attentions, training=False):
+ # Self attention attends to itself, thus keys and queries are the same (input_tensor).
+ self_output = self.self(input_tensor, input_tensor, attention_mask, output_attentions)
+ if output_attentions:
+ attention_probs = self_output[1]
+ attention_output = self.attention_output(self_output[0], input_tensor)
+ return (attention_output, attention_probs) if output_attentions else (attention_output,)
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "self", None) is not None:
+ with tf.name_scope(self.self.name):
+ self.self.build(None)
+ if getattr(self, "attention_output", None) is not None:
+ with tf.name_scope(self.attention_output.name):
+ self.attention_output.build(None)
+
+
+class TFLxmertCrossAttentionLayer(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.att = TFLxmertAttention(config, name="att")
+ self.attention_output = TFLxmertAttentionOutput(config, name="output")
+
+ def call(
+ self,
+ input_tensor,
+ ctx_tensor,
+ ctx_att_mask,
+ output_attentions=False,
+ training=False,
+ ):
+ output = self.att(input_tensor, ctx_tensor, ctx_att_mask, output_attentions, training=training)
+ if output_attentions:
+ attention_probs = output[1]
+ attention_output = self.attention_output(output[0], input_tensor, training=training)
+ outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "att", None) is not None:
+ with tf.name_scope(self.att.name):
+ self.att.build(None)
+ if getattr(self, "attention_output", None) is not None:
+ with tf.name_scope(self.attention_output.name):
+ self.attention_output.build(None)
+
+
+class TFLxmertLayer(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.attention = TFLxmertSelfAttentionLayer(config, name="attention")
+ self.intermediate = TFLxmertIntermediate(config, name="intermediate")
+ self.transformer_output = TFLxmertOutput(config, name="output")
+
+ def call(self, hidden_states, attention_mask, output_attentions, training=False):
+ attention_outputs = self.attention(hidden_states, attention_mask, output_attentions, training=training)
+ attention_output = attention_outputs[0]
+ intermediate_output = self.intermediate(attention_output)
+ layer_output = self.transformer_output(intermediate_output, attention_output, training=training)
+ outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "attention", None) is not None:
+ with tf.name_scope(self.attention.name):
+ self.attention.build(None)
+ if getattr(self, "intermediate", None) is not None:
+ with tf.name_scope(self.intermediate.name):
+ self.intermediate.build(None)
+ if getattr(self, "transformer_output", None) is not None:
+ with tf.name_scope(self.transformer_output.name):
+ self.transformer_output.build(None)
+
+
+class TFLxmertXLayer(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.visual_attention = TFLxmertCrossAttentionLayer(config, name="visual_attention")
+
+ # Self-attention Layers
+ self.lang_self_att = TFLxmertSelfAttentionLayer(config, name="lang_self_att")
+ self.visn_self_att = TFLxmertSelfAttentionLayer(config, name="visn_self_att")
+
+ # Intermediate and Output Layers (FFNs)
+ self.lang_inter = TFLxmertIntermediate(config, name="lang_inter")
+ self.lang_output = TFLxmertOutput(config, name="lang_output")
+ self.visn_inter = TFLxmertIntermediate(config, name="visn_inter")
+ self.visn_output = TFLxmertOutput(config, name="visn_output")
+
+ def cross_att(
+ self,
+ lang_input,
+ lang_attention_mask,
+ visn_input,
+ visn_attention_mask,
+ output_attentions,
+ training=False,
+ ):
+ # Cross Attention
+
+ # Keras saving and loading model *does not work* with the same inputs for two layers.
+ lang_attention_lang_input = tf.identity(lang_input)
+ visn_attention_lang_input = tf.identity(lang_input)
+ lang_attention_visn_input = tf.identity(visn_input)
+ visn_attention_visn_input = tf.identity(visn_input)
+
+ lang_att_output = self.visual_attention(
+ lang_attention_lang_input,
+ lang_attention_visn_input,
+ visn_attention_mask,
+ output_attentions=output_attentions,
+ training=training,
+ )
+ visn_att_output = self.visual_attention(
+ visn_attention_visn_input,
+ visn_attention_lang_input,
+ lang_attention_mask,
+ output_attentions=output_attentions,
+ training=training,
+ )
+ return lang_att_output, visn_att_output
+
+ def self_att(
+ self,
+ lang_input,
+ lang_attention_mask,
+ visn_input,
+ visn_attention_mask,
+ training=False,
+ ):
+ # Self Attention
+ output_attentions = False
+ lang_att_output = self.lang_self_att(lang_input, lang_attention_mask, output_attentions, training=training)
+ visn_att_output = self.visn_self_att(visn_input, visn_attention_mask, output_attentions, training=training)
+ return lang_att_output[0], visn_att_output[0]
+
+ def output_fc(self, lang_input, visn_input, training=False):
+ # FC layers
+ lang_inter_output = self.lang_inter(lang_input)
+ visn_inter_output = self.visn_inter(visn_input)
+
+ # Layer output
+ lang_output = self.lang_output(lang_inter_output, lang_input, training)
+ visn_output = self.visn_output(visn_inter_output, visn_input, training)
+ return lang_output, visn_output
+
+ def call(
+ self,
+ lang_feats,
+ lang_attention_mask,
+ visn_feats,
+ visn_attention_mask,
+ output_attentions,
+ training=False,
+ ):
+ lang_att_output = lang_feats
+ visn_att_output = visn_feats
+
+ lang_att_output, visn_att_output = self.cross_att(
+ lang_att_output,
+ lang_attention_mask,
+ visn_att_output,
+ visn_attention_mask,
+ output_attentions,
+ training=training,
+ )
+ attention_probs = lang_att_output[1:]
+ lang_att_output, visn_att_output = self.self_att(
+ lang_att_output[0],
+ lang_attention_mask,
+ visn_att_output[0],
+ visn_attention_mask,
+ training=training,
+ )
+ lang_output, visn_output = self.output_fc(lang_att_output, visn_att_output, training=training)
+
+ return (lang_output, visn_output, attention_probs[0]) if output_attentions else (lang_output, visn_output)
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "visual_attention", None) is not None:
+ with tf.name_scope(self.visual_attention.name):
+ self.visual_attention.build(None)
+ if getattr(self, "lang_self_att", None) is not None:
+ with tf.name_scope(self.lang_self_att.name):
+ self.lang_self_att.build(None)
+ if getattr(self, "visn_self_att", None) is not None:
+ with tf.name_scope(self.visn_self_att.name):
+ self.visn_self_att.build(None)
+ if getattr(self, "lang_inter", None) is not None:
+ with tf.name_scope(self.lang_inter.name):
+ self.lang_inter.build(None)
+ if getattr(self, "lang_output", None) is not None:
+ with tf.name_scope(self.lang_output.name):
+ self.lang_output.build(None)
+ if getattr(self, "visn_inter", None) is not None:
+ with tf.name_scope(self.visn_inter.name):
+ self.visn_inter.build(None)
+ if getattr(self, "visn_output", None) is not None:
+ with tf.name_scope(self.visn_output.name):
+ self.visn_output.build(None)
+
+
+class TFLxmertEncoder(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.visn_fc = TFLxmertVisualFeatureEncoder(config, name="visn_fc")
+
+ # Number of layers
+ self.num_l_layers = config.l_layers
+ self.num_x_layers = config.x_layers
+ self.num_r_layers = config.r_layers
+
+ # Layers
+ # Using self.layer instead of self.l_layer to support loading BERT weights.
+ self.layer = [TFLxmertLayer(config, name=f"layer_._{i}") for i in range(self.num_l_layers)]
+ self.x_layers = [TFLxmertXLayer(config, name=f"x_layers_._{i}") for i in range(self.num_x_layers)]
+ self.r_layers = [TFLxmertLayer(config, name=f"r_layers_._{i}") for i in range(self.num_r_layers)]
+ self.config = config
+
+ def call(
+ self,
+ lang_feats=None,
+ lang_attention_mask=None,
+ visual_feats=None,
+ visual_pos=None,
+ visual_attention_mask=None,
+ output_attentions=None,
+ training=False,
+ ):
+ vision_hidden_states = ()
+ language_hidden_states = ()
+ vision_attentions = () if output_attentions or self.config.output_attentions else None
+ language_attentions = () if output_attentions or self.config.output_attentions else None
+ cross_encoder_attentions = () if output_attentions or self.config.output_attentions else None
+
+ visual_feats = self.visn_fc([visual_feats, visual_pos], training=training)
+
+ # Run language layers
+ for layer_module in self.layer:
+ l_outputs = layer_module(lang_feats, lang_attention_mask, output_attentions, training=training)
+ lang_feats = l_outputs[0]
+ language_hidden_states = language_hidden_states + (lang_feats,)
+ if language_attentions is not None:
+ language_attentions = language_attentions + (l_outputs[1],)
+
+ # Run relational layers
+ for layer_module in self.r_layers:
+ v_outputs = layer_module(
+ visual_feats,
+ visual_attention_mask,
+ output_attentions,
+ training=training,
+ )
+ visual_feats = v_outputs[0]
+ vision_hidden_states = vision_hidden_states + (visual_feats,)
+ if vision_attentions is not None:
+ vision_attentions = vision_attentions + (v_outputs[1],)
+
+ # Run cross-modality layers
+ for layer_module in self.x_layers:
+ x_outputs = layer_module(
+ lang_feats,
+ lang_attention_mask,
+ visual_feats,
+ visual_attention_mask,
+ output_attentions,
+ training=training,
+ )
+ lang_feats, visual_feats = x_outputs[:2]
+ vision_hidden_states = vision_hidden_states + (visual_feats,)
+ language_hidden_states = language_hidden_states + (lang_feats,)
+ if cross_encoder_attentions is not None:
+ cross_encoder_attentions = cross_encoder_attentions + (x_outputs[2],)
+
+ visual_encoder_outputs = (
+ vision_hidden_states,
+ vision_attentions if output_attentions else None,
+ )
+ lang_encoder_outputs = (
+ language_hidden_states,
+ language_attentions if output_attentions else None,
+ )
+
+ return (
+ visual_encoder_outputs,
+ lang_encoder_outputs,
+ cross_encoder_attentions if output_attentions else None,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "visn_fc", None) is not None:
+ with tf.name_scope(self.visn_fc.name):
+ self.visn_fc.build(None)
+ if getattr(self, "layer", None) is not None:
+ for layer in self.layer:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+ if getattr(self, "x_layers", None) is not None:
+ for layer in self.x_layers:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+ if getattr(self, "r_layers", None) is not None:
+ for layer in self.r_layers:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+@keras_serializable
+class TFLxmertMainLayer(keras.layers.Layer):
+ config_class = LxmertConfig
+
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.num_l_layers = config.l_layers
+ self.num_x_layers = config.x_layers
+ self.num_r_layers = config.r_layers
+ self.initializer_range = config.initializer_range
+ self.output_attentions = config.output_attentions
+ self.output_hidden_states = config.output_hidden_states
+ self.return_dict = config.use_return_dict
+ self.embeddings = TFLxmertEmbeddings(config, name="embeddings")
+ self.encoder = TFLxmertEncoder(config, name="encoder")
+ self.pooler = TFLxmertPooler(config, name="pooler")
+ self.config = config
+
+ def get_input_embeddings(self):
+ return self.embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.weight = value
+ self.embeddings.vocab_size = shape_list(value)[0]
+
+ def _prune_heads(self, heads_to_prune):
+ raise NotImplementedError
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids=None,
+ visual_feats=None,
+ visual_pos=None,
+ attention_mask=None,
+ visual_attention_mask=None,
+ token_type_ids=None,
+ inputs_embeds=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ training=False,
+ ):
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = shape_list(input_ids)
+ elif inputs_embeds is not None:
+ input_shape = shape_list(inputs_embeds)[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+ if visual_pos is None or visual_feats is None:
+ raise ValueError("visual_feats and visual_pos cannot be `None` in LXMERT's `call` method.")
+
+ if attention_mask is None:
+ attention_mask = tf.fill(input_shape, 1)
+
+ if token_type_ids is None:
+ token_type_ids = tf.fill(input_shape, 0)
+
+ # Positional Word Embeddings
+ embedding_output = self.embeddings(input_ids, token_type_ids, inputs_embeds, training)
+
+ # We create a 3D attention mask from a 2D tensor mask.
+ # Sizes are [batch_size, 1, 1, to_seq_length]
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
+ # this attention mask is more simple than the triangular masking of causal attention
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
+ extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1]))
+
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
+ # masked positions, this operation will create a tensor which is 0.0 for
+ # positions we want to attend and -10000.0 for masked positions.
+ # Since we are adding it to the raw scores before the softmax, this is
+ # effectively the same as removing these entirely.
+
+ extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
+ one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
+ ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
+ extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
+
+ if visual_attention_mask is not None:
+ extended_visual_attention_mask = tf.reshape(visual_attention_mask, (input_shape[0], 1, 1, input_shape[1]))
+ extended_visual_attention_mask = tf.expand_dims(tf.expand_dims(visual_attention_mask, axis=1), axis=1)
+
+ extended_visual_attention_mask = tf.cast(extended_visual_attention_mask, dtype=embedding_output.dtype)
+ extended_visual_attention_mask = tf.multiply(
+ tf.subtract(one_cst, extended_visual_attention_mask), ten_thousand_cst
+ )
+ else:
+ extended_visual_attention_mask = None
+
+ # Run Lxmert encoder
+ encoder_outputs = self.encoder(
+ embedding_output,
+ extended_attention_mask,
+ visual_feats,
+ visual_pos,
+ extended_visual_attention_mask,
+ output_attentions,
+ training,
+ )
+ visual_encoder_outputs, lang_encoder_outputs = encoder_outputs[:2]
+ vision_hidden_states = visual_encoder_outputs[0]
+ language_hidden_states = lang_encoder_outputs[0]
+
+ all_attentions = ()
+ if output_attentions:
+ language_attentions = lang_encoder_outputs[1]
+ vision_attentions = visual_encoder_outputs[1]
+ cross_encoder_attentions = encoder_outputs[2]
+ all_attentions = (
+ language_attentions,
+ vision_attentions,
+ cross_encoder_attentions,
+ )
+
+ hidden_states = (language_hidden_states, vision_hidden_states) if output_hidden_states else ()
+
+ visual_output = vision_hidden_states[-1]
+ lang_output = language_hidden_states[-1]
+ pooled_output = self.pooler(lang_output)
+
+ if not return_dict:
+ return (lang_output, visual_output, pooled_output) + hidden_states + all_attentions
+
+ return TFLxmertModelOutput(
+ pooled_output=pooled_output,
+ language_output=lang_output,
+ vision_output=visual_output,
+ language_hidden_states=language_hidden_states if output_hidden_states else None,
+ vision_hidden_states=vision_hidden_states if output_hidden_states else None,
+ language_attentions=language_attentions if output_attentions else None,
+ vision_attentions=vision_attentions if output_attentions else None,
+ cross_encoder_attentions=cross_encoder_attentions if output_attentions else None,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embeddings", None) is not None:
+ with tf.name_scope(self.embeddings.name):
+ self.embeddings.build(None)
+ if getattr(self, "encoder", None) is not None:
+ with tf.name_scope(self.encoder.name):
+ self.encoder.build(None)
+ if getattr(self, "pooler", None) is not None:
+ with tf.name_scope(self.pooler.name):
+ self.pooler.build(None)
+
+
+class TFLxmertPreTrainedModel(TFPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = LxmertConfig
+ base_model_prefix = "lxmert"
+
+ @property
+ def dummy_inputs(self):
+ """
+ Dummy inputs to build the network.
+
+ Returns:
+ tf.Tensor with dummy inputs
+ """
+ batch_size = 2
+ num_visual_features = 10
+ input_ids = tf.constant([[3, 5, 6], [2, 3, 4]], dtype=tf.int32)
+ visual_feats = tf.random.uniform((batch_size, num_visual_features, self.config.visual_feat_dim))
+ visual_pos = tf.random.uniform((batch_size, num_visual_features, 4))
+
+ return {
+ "input_ids": input_ids,
+ "visual_feats": visual_feats,
+ "visual_pos": visual_pos,
+ }
+
+ @property
+ def input_signature(self):
+ return {
+ "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"),
+ "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
+ "visual_feats": tf.TensorSpec((None, None, self.config.visual_feat_dim), tf.float32, name="visual_feats"),
+ "visual_pos": tf.TensorSpec((None, None, 4), tf.float32, name="visual_pos"),
+ "visual_attention_mask": tf.TensorSpec((None, None), tf.int32, name="visual_attention_mask"),
+ "token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"),
+ }
+
+
+LXMERT_START_DOCSTRING = r"""
+
+ The LXMERT model was proposed in [LXMERT: Learning Cross-Modality Encoder Representations from
+ Transformers](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal. It's a vision and language transformer
+ model, pre-trained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MCSCOCO captions, and Visual
+ genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss
+ for question answering attribute prediction, and object tag prediction.
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TensorFlow models and layers in `transformers` accept two formats as input:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional argument.
+
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
+ positional argument:
+
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
+
+ Note that when creating models and layers with
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
+ about any of this, as you can just pass inputs like you would to any other Python function!
+
+
+
+ Parameters:
+ config ([`LxmertConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+LXMERT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`np.ndarray` or `tf.Tensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
+ [`PreTrainedTokenizer.encode`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ visual_feats (`tf.Tensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`):
+ This input represents visual features. They ROI pooled object features from bounding boxes using a
+ faster-RCNN model)
+
+ These are currently not provided by the transformers library.
+ visual_pos (`tf.Tensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`):
+ This input represents spacial features corresponding to their relative (via index) visual features. The
+ pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to
+ 1.
+
+ These are currently not provided by the transformers library.
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ visual_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ MMask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
+ config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+"""
+
+
+@add_start_docstrings(
+ "The bare Lxmert Model transformer outputting raw hidden-states without any specific head on top.",
+ LXMERT_START_DOCSTRING,
+)
+class TFLxmertModel(TFLxmertPreTrainedModel):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.lxmert = TFLxmertMainLayer(config, name="lxmert")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFLxmertModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ visual_feats: tf.Tensor | None = None,
+ visual_pos: tf.Tensor | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ visual_attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[Tuple, TFLxmertModelOutput]:
+ outputs = self.lxmert(
+ input_ids,
+ visual_feats,
+ visual_pos,
+ attention_mask,
+ visual_attention_mask,
+ token_type_ids,
+ inputs_embeds,
+ output_attentions,
+ output_hidden_states,
+ return_dict,
+ training,
+ )
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "lxmert", None) is not None:
+ with tf.name_scope(self.lxmert.name):
+ self.lxmert.build(None)
+
+
+class TFLxmertPooler(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.dense = keras.layers.Dense(
+ config.hidden_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ activation="tanh",
+ name="dense",
+ )
+ self.config = config
+
+ def call(self, hidden_states):
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(first_token_tensor)
+ return pooled_output
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertPredictionHeadTransform with Bert->Lxmert
+class TFLxmertPredictionHeadTransform(keras.layers.Layer):
+ def __init__(self, config: LxmertConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = keras.layers.Dense(
+ units=config.hidden_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="dense",
+ )
+
+ if isinstance(config.hidden_act, str):
+ self.transform_act_fn = get_tf_activation(config.hidden_act)
+ else:
+ self.transform_act_fn = config.hidden_act
+
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ hidden_states = self.dense(inputs=hidden_states)
+ hidden_states = self.transform_act_fn(hidden_states)
+ hidden_states = self.LayerNorm(inputs=hidden_states)
+
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.hidden_size])
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertLMPredictionHead with Bert->Lxmert
+class TFLxmertLMPredictionHead(keras.layers.Layer):
+ def __init__(self, config: LxmertConfig, input_embeddings: keras.layers.Layer, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.hidden_size = config.hidden_size
+
+ self.transform = TFLxmertPredictionHeadTransform(config, name="transform")
+
+ # The output weights are the same as the input embeddings, but there is
+ # an output-only bias for each token.
+ self.input_embeddings = input_embeddings
+
+ def build(self, input_shape=None):
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "transform", None) is not None:
+ with tf.name_scope(self.transform.name):
+ self.transform.build(None)
+
+ def get_output_embeddings(self) -> keras.layers.Layer:
+ return self.input_embeddings
+
+ def set_output_embeddings(self, value: tf.Variable):
+ self.input_embeddings.weight = value
+ self.input_embeddings.vocab_size = shape_list(value)[0]
+
+ def get_bias(self) -> Dict[str, tf.Variable]:
+ return {"bias": self.bias}
+
+ def set_bias(self, value: tf.Variable):
+ self.bias = value["bias"]
+ self.config.vocab_size = shape_list(value["bias"])[0]
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ hidden_states = self.transform(hidden_states=hidden_states)
+ seq_length = shape_list(hidden_states)[1]
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size])
+ hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
+
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertMLMHead with Bert->Lxmert
+class TFLxmertMLMHead(keras.layers.Layer):
+ def __init__(self, config: LxmertConfig, input_embeddings: keras.layers.Layer, **kwargs):
+ super().__init__(**kwargs)
+
+ self.predictions = TFLxmertLMPredictionHead(config, input_embeddings, name="predictions")
+
+ def call(self, sequence_output: tf.Tensor) -> tf.Tensor:
+ prediction_scores = self.predictions(hidden_states=sequence_output)
+
+ return prediction_scores
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "predictions", None) is not None:
+ with tf.name_scope(self.predictions.name):
+ self.predictions.build(None)
+
+
+class TFLxmertPreTrainingHeads(keras.layers.Layer):
+ def __init__(self, config, input_embeddings, **kwargs):
+ super().__init__(**kwargs)
+ self.predictions = TFLxmertLMPredictionHead(config, input_embeddings, name="predictions")
+
+ self.seq_relationship = keras.layers.Dense(
+ 2,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="seq_relationship",
+ )
+ self.config = config
+
+ def call(self, sequence_output, pooled_output):
+ prediction_scores = self.predictions(sequence_output)
+ seq_relationship_score = self.seq_relationship(pooled_output)
+ return prediction_scores, seq_relationship_score
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "predictions", None) is not None:
+ with tf.name_scope(self.predictions.name):
+ self.predictions.build(None)
+ if getattr(self, "seq_relationship", None) is not None:
+ with tf.name_scope(self.seq_relationship.name):
+ self.seq_relationship.build([None, None, self.config.hidden_size])
+
+
+class TFLxmertVisualAnswerHead(keras.layers.Layer):
+ def __init__(self, config, num_labels, **kwargs):
+ super().__init__(**kwargs)
+ hid_dim = config.hidden_size
+ self.dense = keras.layers.Dense(
+ hid_dim * 2,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="logit_fc_._0",
+ )
+ self.activation = get_tf_activation("gelu")
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="logit_fc_._2")
+ self.dense_1 = keras.layers.Dense(
+ num_labels,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="logit_fc_._3",
+ )
+ self.hid_dim = hid_dim
+
+ def call(self, hidden_states):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states = self.dense_1(hidden_states)
+
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.hid_dim])
+ if getattr(self, "layer_norm", None) is not None:
+ with tf.name_scope(self.layer_norm.name):
+ self.layer_norm.build([None, self.hid_dim * 2])
+ if getattr(self, "dense_1", None) is not None:
+ with tf.name_scope(self.dense_1.name):
+ self.dense_1.build([None, None, self.hid_dim * 2])
+
+
+class TFLxmertVisualObjHead(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.transform = TFLxmertPredictionHeadTransform(config, name="transform")
+
+ # Decide the use of visual losses
+ visual_losses = {}
+ if config.visual_obj_loss:
+ visual_losses["obj"] = {"shape": (-1,), "num": config.num_object_labels}
+ if config.visual_attr_loss:
+ visual_losses["attr"] = {"shape": (-1,), "num": config.num_attr_labels}
+ if config.visual_feat_loss:
+ visual_losses["feat"] = {"shape": (-1, 2048), "num": config.visual_feat_dim}
+ self.visual_losses = visual_losses
+
+ # The output weights are the same as the input embeddings, but there is
+ # an output-only bias for each token.
+ self.decoder_dict = {
+ key: keras.layers.Dense(
+ self.visual_losses[key]["num"],
+ kernel_initializer=get_initializer(config.initializer_range),
+ name=f"decoder_dict.{key}",
+ )
+ for key in self.visual_losses
+ }
+ self.config = config
+
+ def call(self, hidden_states):
+ hidden_states = self.transform(hidden_states)
+ output = {}
+ for key in self.visual_losses:
+ output[key] = self.decoder_dict[key](hidden_states)
+ return output
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "transform", None) is not None:
+ with tf.name_scope(self.transform.name):
+ self.transform.build(None)
+ if getattr(self, "decoder_dict", None) is not None:
+ for layer in self.decoder_dict.values():
+ with tf.name_scope(layer.name):
+ layer.build([None, None, self.config.hidden_size])
+
+
+@add_start_docstrings("""Lxmert Model with a `language modeling` head on top.""", LXMERT_START_DOCSTRING)
+class TFLxmertForPreTraining(TFLxmertPreTrainedModel):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.config = config
+ self.num_qa_labels = config.num_qa_labels
+ self.visual_loss_normalizer = config.visual_loss_normalizer
+
+ # Use of pretraining tasks
+ self.task_mask_lm = config.task_mask_lm
+ self.task_obj_predict = config.task_obj_predict
+ self.task_matched = config.task_matched
+ self.task_qa = config.task_qa
+
+ # Lxmert backbone
+ self.lxmert = TFLxmertMainLayer(config, name="lxmert")
+
+ # Pre-training heads
+ self.cls = TFLxmertPreTrainingHeads(config, self.lxmert.embeddings, name="cls")
+ if self.task_obj_predict:
+ self.obj_predict_head = TFLxmertVisualObjHead(config, name="obj_predict_head")
+ if self.task_qa:
+ self.answer_head = TFLxmertVisualAnswerHead(config, self.num_qa_labels, name="answer_head")
+
+ # Loss functions
+ self.loss_fcts = {
+ "l2": keras.losses.Huber(delta=1.0, name="huber_loss"),
+ "visn_ce": keras.losses.SparseCategoricalCrossentropy(from_logits=True),
+ "ce": keras.losses.SparseCategoricalCrossentropy(from_logits=True),
+ }
+
+ visual_losses = {}
+ if config.visual_obj_loss:
+ visual_losses["obj"] = {
+ "shape": (-1,),
+ "num": config.num_object_labels,
+ "loss": "visn_ce",
+ }
+ if config.visual_attr_loss:
+ visual_losses["attr"] = {
+ "shape": (-1,),
+ "num": config.num_attr_labels,
+ "loss": "visn_ce",
+ }
+ if config.visual_feat_loss:
+ visual_losses["feat"] = {
+ "shape": (-1, config.visual_feat_dim),
+ "num": config.visual_feat_dim,
+ "loss": "l2",
+ }
+ self.visual_losses = visual_losses
+
+ @property
+ def dummy_inputs(self):
+ """
+ Dummy inputs to build the network.
+
+ Returns:
+ tf.Tensor with dummy inputs
+ """
+ batch_size = 2
+ num_visual_features = 10
+ input_ids = tf.constant([[3, 5, 6], [2, 3, 4]], dtype=tf.int32)
+ visual_feats = tf.random.uniform((batch_size, num_visual_features, self.config.visual_feat_dim))
+ visual_pos = tf.random.uniform((batch_size, num_visual_features, 4))
+
+ if self.config.task_obj_predict:
+ obj_labels = {}
+ if self.config.visual_attr_loss and self.config.task_obj_predict:
+ obj_labels["attr"] = (
+ tf.ones([batch_size, num_visual_features]),
+ tf.ones([batch_size, num_visual_features]),
+ )
+ if self.config.visual_feat_loss and self.config.task_obj_predict:
+ obj_labels["feat"] = (
+ tf.ones([batch_size, num_visual_features, self.config.visual_feat_dim]),
+ tf.ones([batch_size, num_visual_features]),
+ )
+ if self.config.visual_obj_loss and self.config.task_obj_predict:
+ obj_labels["obj"] = (
+ tf.ones([batch_size, num_visual_features]),
+ tf.ones([batch_size, num_visual_features]),
+ )
+
+ return {
+ **{
+ "input_ids": input_ids,
+ "visual_feats": visual_feats,
+ "visual_pos": visual_pos,
+ },
+ **({"obj_labels": obj_labels} if self.config.task_obj_predict else {}),
+ }
+
+ def get_lm_head(self):
+ return self.cls.predictions
+
+ def get_prefix_bias_name(self):
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
+ return self.name + "/" + self.cls.name + "/" + self.cls.predictions.name
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TFLxmertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ visual_feats: tf.Tensor | None = None,
+ visual_pos: tf.Tensor | None = None,
+ attention_mask: tf.Tensor | None = None,
+ visual_attention_mask: tf.Tensor | None = None,
+ token_type_ids: tf.Tensor | None = None,
+ inputs_embeds: tf.Tensor | None = None,
+ masked_lm_labels: tf.Tensor | None = None,
+ obj_labels: Dict[str, Tuple[tf.Tensor, tf.Tensor]] | None = None,
+ matched_label: tf.Tensor | None = None,
+ ans: tf.Tensor | None = None,
+ output_attentions: bool | None = None,
+ output_hidden_states: bool | None = None,
+ return_dict: bool | None = None,
+ training: bool = False,
+ ) -> Tuple[tf.Tensor] | TFLxmertForPreTrainingOutput:
+ r"""
+ masked_lm_labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ obj_labels (`Dict[Str: Tuple[tf.Tensor, tf.Tensor]]`, *optional*, defaults to `None`):
+ each key is named after each one of the visual losses and each element of the tuple is of the shape
+ `(batch_size, num_features)` and `(batch_size, num_features, visual_feature_dim)` for each the label id and
+ the label score respectively
+ matched_label (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the whether or not the text input matches the image (classification) loss. Input
+ should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`:
+
+ - 0 indicates that the sentence does not match the image,
+ - 1 indicates that the sentence does match the image.
+ ans (`tf.Tensor` of shape `(batch_size)`, *optional*, defaults to `None`):
+ a one hot representation hof the correct answer *optional*
+
+ Returns:
+ """
+
+ lxmert_output = self.lxmert(
+ input_ids,
+ visual_feats,
+ visual_pos,
+ attention_mask,
+ visual_attention_mask,
+ token_type_ids,
+ inputs_embeds,
+ output_attentions,
+ output_hidden_states,
+ return_dict,
+ training,
+ )
+
+ lang_output, visual_output, pooled_output = (
+ lxmert_output[0],
+ lxmert_output[1],
+ lxmert_output[2],
+ )
+ lang_prediction_scores, cross_relationship_score = self.cls(lang_output, pooled_output)
+ if self.task_qa:
+ answer_score = self.answer_head(pooled_output)
+ else:
+ answer_score = pooled_output[0][0]
+
+ total_loss = (
+ None
+ if (masked_lm_labels is None and matched_label is None and obj_labels is None and ans is None)
+ else tf.constant(0.0)
+ )
+ losses = ()
+ if masked_lm_labels is not None and self.task_mask_lm:
+ masked_lm_loss = self.loss_fcts["ce"](
+ tf.reshape(masked_lm_labels, [-1]),
+ tf.reshape(lang_prediction_scores, [-1, self.config.vocab_size]),
+ )
+ total_loss += masked_lm_loss
+ losses += (masked_lm_loss,)
+ if matched_label is not None and self.task_matched:
+ matched_loss = self.loss_fcts["ce"](
+ tf.reshape(matched_label, [-1]),
+ tf.reshape(cross_relationship_score, [-1, 2]),
+ )
+ total_loss += matched_loss
+ losses += (matched_loss,)
+ if obj_labels is not None and self.task_obj_predict:
+ total_visn_loss = 0.0
+ visn_prediction_scores_dict = self.obj_predict_head(visual_output)
+ for key, key_info in self.visual_losses.items():
+ label, mask_conf = obj_labels[key]
+ output_dim = key_info["num"]
+ loss_fct_name = key_info["loss"]
+ label_shape = key_info["shape"]
+ weight = self.visual_loss_normalizer
+ visn_loss_fct = self.loss_fcts[loss_fct_name]
+ visn_prediction_scores = visn_prediction_scores_dict[key]
+ visn_loss = visn_loss_fct(
+ tf.reshape(label, label_shape),
+ tf.reshape(visn_prediction_scores, [-1, output_dim]),
+ )
+
+ if visn_loss.ndim > 1: # Regression Losses
+ visn_loss = tf.reduce_mean(visn_loss)
+ visn_loss = tf.reduce_mean(visn_loss * tf.cast(tf.reshape(mask_conf, [-1]), visn_loss.dtype)) * weight
+ total_visn_loss += visn_loss
+ losses += (visn_loss,)
+ total_loss += total_visn_loss
+ if ans is not None and self.task_qa:
+ answer_loss = self.loss_fcts["ce"](
+ tf.reshape(ans, [-1]), tf.reshape(answer_score, [-1, self.num_qa_labels])
+ )
+ # exclude "*2" here to match the effect of QA losses.
+ # Previous: (loss *0) for 6 epochs, (loss *2) for 6 epochs. (Used 10 instead of 6 in EMNLP paper)
+ # Now : (loss *1) for 12 epochs
+ #
+ # * 2 # Multiply by 2 because > half of the data will not have label
+ total_loss += answer_loss
+ losses += (answer_loss,)
+ # return total_loss, tf.stack(losses)[tf.new_axis, ...], answer_score.detach()
+
+ if not return_dict:
+ output = (
+ lang_prediction_scores,
+ cross_relationship_score,
+ answer_score,
+ ) + lxmert_output[3:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return TFLxmertForPreTrainingOutput(
+ loss=total_loss,
+ prediction_logits=lang_prediction_scores,
+ cross_relationship_score=cross_relationship_score,
+ question_answering_score=answer_score,
+ language_hidden_states=lxmert_output.language_hidden_states,
+ vision_hidden_states=lxmert_output.vision_hidden_states,
+ language_attentions=lxmert_output.language_attentions,
+ vision_attentions=lxmert_output.vision_attentions,
+ cross_encoder_attentions=lxmert_output.cross_encoder_attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "lxmert", None) is not None:
+ with tf.name_scope(self.lxmert.name):
+ self.lxmert.build(None)
+ if getattr(self, "cls", None) is not None:
+ with tf.name_scope(self.cls.name):
+ self.cls.build(None)
+ if getattr(self, "obj_predict_head", None) is not None:
+ with tf.name_scope(self.obj_predict_head.name):
+ self.obj_predict_head.build(None)
+ if getattr(self, "answer_head", None) is not None:
+ with tf.name_scope(self.answer_head.name):
+ self.answer_head.build(None)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/tokenization_lxmert.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/tokenization_lxmert.py
new file mode 100644
index 0000000000000000000000000000000000000000..8d2fca9328ddc4b658760e5597d766d4b885c3b7
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/tokenization_lxmert.py
@@ -0,0 +1,503 @@
+# coding=utf-8
+# Copyright 2020 The Google AI Team, Stanford University and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections
+import os
+import unicodedata
+from typing import List, Optional, Tuple
+
+from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
+
+
+# Copied from transformers.models.bert.tokenization_bert.load_vocab
+def load_vocab(vocab_file):
+ """Loads a vocabulary file into a dictionary."""
+ vocab = collections.OrderedDict()
+ with open(vocab_file, "r", encoding="utf-8") as reader:
+ tokens = reader.readlines()
+ for index, token in enumerate(tokens):
+ token = token.rstrip("\n")
+ vocab[token] = index
+ return vocab
+
+
+# Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
+def whitespace_tokenize(text):
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
+ text = text.strip()
+ if not text:
+ return []
+ tokens = text.split()
+ return tokens
+
+
+# Copied from transformers.models.bert.tokenization_bert.BertTokenizer with bert-base-cased->unc-nlp/lxmert-base-uncased, BERT->Lxmert, BertTokenizer->LxmertTokenizer
+class LxmertTokenizer(PreTrainedTokenizer):
+ r"""
+ Construct a Lxmert tokenizer. Based on WordPiece.
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ File containing the vocabulary.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
+ Whether or not to do basic tokenization before WordPiece.
+ never_split (`Iterable`, *optional*):
+ Collection of tokens which will never be split during tokenization. Only has an effect when
+ `do_basic_tokenize=True`
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
+ The token used for padding, for example when batching sequences of different lengths.
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters.
+
+ This should likely be deactivated for Japanese (see this
+ [issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original Lxmert).
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+
+ def __init__(
+ self,
+ vocab_file,
+ do_lower_case=True,
+ do_basic_tokenize=True,
+ never_split=None,
+ unk_token="[UNK]",
+ sep_token="[SEP]",
+ pad_token="[PAD]",
+ cls_token="[CLS]",
+ mask_token="[MASK]",
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ **kwargs,
+ ):
+ if not os.path.isfile(vocab_file):
+ raise ValueError(
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
+ " model use `tokenizer = LxmertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
+ )
+ self.vocab = load_vocab(vocab_file)
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
+ self.do_basic_tokenize = do_basic_tokenize
+ if do_basic_tokenize:
+ self.basic_tokenizer = BasicTokenizer(
+ do_lower_case=do_lower_case,
+ never_split=never_split,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ )
+
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
+
+ super().__init__(
+ do_lower_case=do_lower_case,
+ do_basic_tokenize=do_basic_tokenize,
+ never_split=never_split,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ **kwargs,
+ )
+
+ @property
+ def do_lower_case(self):
+ return self.basic_tokenizer.do_lower_case
+
+ @property
+ def vocab_size(self):
+ return len(self.vocab)
+
+ def get_vocab(self):
+ return dict(self.vocab, **self.added_tokens_encoder)
+
+ def _tokenize(self, text, split_special_tokens=False):
+ split_tokens = []
+ if self.do_basic_tokenize:
+ for token in self.basic_tokenizer.tokenize(
+ text, never_split=self.all_special_tokens if not split_special_tokens else None
+ ):
+ # If the token is part of the never_split set
+ if token in self.basic_tokenizer.never_split:
+ split_tokens.append(token)
+ else:
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
+ else:
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
+ return split_tokens
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.ids_to_tokens.get(index, self.unk_token)
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ out_string = " ".join(tokens).replace(" ##", "").strip()
+ return out_string
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A Lxmert sequence has the following format:
+
+ - single sequence: `[CLS] X [SEP]`
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ return cls + token_ids_0 + sep + token_ids_1 + sep
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is not None:
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1]
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Lxmert sequence
+ pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ index = 0
+ if os.path.isdir(save_directory):
+ vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+ else:
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
+ with open(vocab_file, "w", encoding="utf-8") as writer:
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
+ if index != token_index:
+ logger.warning(
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
+ " Please check that the vocabulary is not corrupted!"
+ )
+ index = token_index
+ writer.write(token + "\n")
+ index += 1
+ return (vocab_file,)
+
+
+# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
+class BasicTokenizer(object):
+ """
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
+
+ Args:
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ never_split (`Iterable`, *optional*):
+ Collection of tokens which will never be split during tokenization. Only has an effect when
+ `do_basic_tokenize=True`
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters.
+
+ This should likely be deactivated for Japanese (see this
+ [issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
+ the full context of the words, such as contractions.
+ """
+
+ def __init__(
+ self,
+ do_lower_case=True,
+ never_split=None,
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ do_split_on_punc=True,
+ ):
+ if never_split is None:
+ never_split = []
+ self.do_lower_case = do_lower_case
+ self.never_split = set(never_split)
+ self.tokenize_chinese_chars = tokenize_chinese_chars
+ self.strip_accents = strip_accents
+ self.do_split_on_punc = do_split_on_punc
+
+ def tokenize(self, text, never_split=None):
+ """
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
+
+ Args:
+ never_split (`List[str]`, *optional*)
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
+ """
+ # union() returns a new set by concatenating the two sets.
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
+ text = self._clean_text(text)
+
+ # This was added on November 1st, 2018 for the multilingual and Chinese
+ # models. This is also applied to the English models now, but it doesn't
+ # matter since the English models were not trained on any Chinese data
+ # and generally don't have any Chinese data in them (there are Chinese
+ # characters in the vocabulary because Wikipedia does have some Chinese
+ # words in the English Wikipedia.).
+ if self.tokenize_chinese_chars:
+ text = self._tokenize_chinese_chars(text)
+ # prevents treating the same character with different unicode codepoints as different characters
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
+ split_tokens = []
+ for token in orig_tokens:
+ if token not in never_split:
+ if self.do_lower_case:
+ token = token.lower()
+ if self.strip_accents is not False:
+ token = self._run_strip_accents(token)
+ elif self.strip_accents:
+ token = self._run_strip_accents(token)
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
+
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
+ return output_tokens
+
+ def _run_strip_accents(self, text):
+ """Strips accents from a piece of text."""
+ text = unicodedata.normalize("NFD", text)
+ output = []
+ for char in text:
+ cat = unicodedata.category(char)
+ if cat == "Mn":
+ continue
+ output.append(char)
+ return "".join(output)
+
+ def _run_split_on_punc(self, text, never_split=None):
+ """Splits punctuation on a piece of text."""
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
+ return [text]
+ chars = list(text)
+ i = 0
+ start_new_word = True
+ output = []
+ while i < len(chars):
+ char = chars[i]
+ if _is_punctuation(char):
+ output.append([char])
+ start_new_word = True
+ else:
+ if start_new_word:
+ output.append([])
+ start_new_word = False
+ output[-1].append(char)
+ i += 1
+
+ return ["".join(x) for x in output]
+
+ def _tokenize_chinese_chars(self, text):
+ """Adds whitespace around any CJK character."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if self._is_chinese_char(cp):
+ output.append(" ")
+ output.append(char)
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+ def _is_chinese_char(self, cp):
+ """Checks whether CP is the codepoint of a CJK character."""
+ # This defines a "chinese character" as anything in the CJK Unicode block:
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
+ #
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
+ # despite its name. The modern Korean Hangul alphabet is a different block,
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
+ # space-separated words, so they are not treated specially and handled
+ # like the all of the other languages.
+ if (
+ (cp >= 0x4E00 and cp <= 0x9FFF)
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
+ or (cp >= 0xF900 and cp <= 0xFAFF)
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
+ ): #
+ return True
+
+ return False
+
+ def _clean_text(self, text):
+ """Performs invalid character removal and whitespace cleanup on text."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
+ continue
+ if _is_whitespace(char):
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+
+# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
+class WordpieceTokenizer(object):
+ """Runs WordPiece tokenization."""
+
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
+ self.vocab = vocab
+ self.unk_token = unk_token
+ self.max_input_chars_per_word = max_input_chars_per_word
+
+ def tokenize(self, text):
+ """
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
+ tokenization using the given vocabulary.
+
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
+
+ Args:
+ text: A single token or whitespace separated tokens. This should have
+ already been passed through *BasicTokenizer*.
+
+ Returns:
+ A list of wordpiece tokens.
+ """
+
+ output_tokens = []
+ for token in whitespace_tokenize(text):
+ chars = list(token)
+ if len(chars) > self.max_input_chars_per_word:
+ output_tokens.append(self.unk_token)
+ continue
+
+ is_bad = False
+ start = 0
+ sub_tokens = []
+ while start < len(chars):
+ end = len(chars)
+ cur_substr = None
+ while start < end:
+ substr = "".join(chars[start:end])
+ if start > 0:
+ substr = "##" + substr
+ if substr in self.vocab:
+ cur_substr = substr
+ break
+ end -= 1
+ if cur_substr is None:
+ is_bad = True
+ break
+ sub_tokens.append(cur_substr)
+ start = end
+
+ if is_bad:
+ output_tokens.append(self.unk_token)
+ else:
+ output_tokens.extend(sub_tokens)
+ return output_tokens
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/tokenization_lxmert_fast.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/tokenization_lxmert_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..e31fdbcf761d50b20615c91b5587279c5fdd266e
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/lxmert/tokenization_lxmert_fast.py
@@ -0,0 +1,169 @@
+# coding=utf-8
+# Copyright 2020 The Google AI Team, Stanford University and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+from typing import List, Optional, Tuple
+
+from tokenizers import normalizers
+
+from ...tokenization_utils_fast import PreTrainedTokenizerFast
+from .tokenization_lxmert import LxmertTokenizer
+
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
+
+
+# Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast with bert-base-cased->unc-nlp/lxmert-base-uncased, BERT->Lxmert, Bert->Lxmert
+class LxmertTokenizerFast(PreTrainedTokenizerFast):
+ r"""
+ Construct a "fast" Lxmert tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
+
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
+ refer to this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ File containing the vocabulary.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
+ The token used for padding, for example when batching sequences of different lengths.
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ clean_text (`bool`, *optional*, defaults to `True`):
+ Whether or not to clean the text before tokenization by removing any control characters and replacing all
+ whitespaces by the classic one.
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
+ issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original Lxmert).
+ wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
+ The prefix for subwords.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ slow_tokenizer_class = LxmertTokenizer
+
+ def __init__(
+ self,
+ vocab_file=None,
+ tokenizer_file=None,
+ do_lower_case=True,
+ unk_token="[UNK]",
+ sep_token="[SEP]",
+ pad_token="[PAD]",
+ cls_token="[CLS]",
+ mask_token="[MASK]",
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ **kwargs,
+ ):
+ super().__init__(
+ vocab_file,
+ tokenizer_file=tokenizer_file,
+ do_lower_case=do_lower_case,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ **kwargs,
+ )
+
+ normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
+ if (
+ normalizer_state.get("lowercase", do_lower_case) != do_lower_case
+ or normalizer_state.get("strip_accents", strip_accents) != strip_accents
+ or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
+ ):
+ normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
+ normalizer_state["lowercase"] = do_lower_case
+ normalizer_state["strip_accents"] = strip_accents
+ normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
+ self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
+
+ self.do_lower_case = do_lower_case
+
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A Lxmert sequence has the following format:
+
+ - single sequence: `[CLS] X [SEP]`
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+
+ if token_ids_1 is not None:
+ output += token_ids_1 + [self.sep_token_id]
+
+ return output
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Lxmert sequence
+ pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
+ return tuple(files)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/mega/__pycache__/convert_mega_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/mega/__pycache__/convert_mega_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f24f580ffae5962909b60a0832054db2f0fc41c1
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/mega/__pycache__/convert_mega_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/squeezebert/__pycache__/configuration_squeezebert.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/squeezebert/__pycache__/configuration_squeezebert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9bb91c1b83bfb1d0189b09eec9f4167459861b4a
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/squeezebert/__pycache__/configuration_squeezebert.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/squeezebert/__pycache__/tokenization_squeezebert.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/squeezebert/__pycache__/tokenization_squeezebert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..880b4f0c828d09d1c1671a94760dd2ff2349694c
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/squeezebert/__pycache__/tokenization_squeezebert.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/squeezebert/__pycache__/tokenization_squeezebert_fast.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/squeezebert/__pycache__/tokenization_squeezebert_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fd3aaba2c9e23dbbda188be500c0165fec8e3273
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/squeezebert/__pycache__/tokenization_squeezebert_fast.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d5afba10dacfcdd5691c42b4d56b0aeed92d78b
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/__init__.py
@@ -0,0 +1,85 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
+
+
+_import_structure = {"configuration_vilt": ["VILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViltConfig"]}
+
+try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["feature_extraction_vilt"] = ["ViltFeatureExtractor"]
+ _import_structure["image_processing_vilt"] = ["ViltImageProcessor"]
+ _import_structure["processing_vilt"] = ["ViltProcessor"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_vilt"] = [
+ "VILT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "ViltForImageAndTextRetrieval",
+ "ViltForImagesAndTextClassification",
+ "ViltForTokenClassification",
+ "ViltForMaskedLM",
+ "ViltForQuestionAnswering",
+ "ViltLayer",
+ "ViltModel",
+ "ViltPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_vilt import VILT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViltConfig
+
+ try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .feature_extraction_vilt import ViltFeatureExtractor
+ from .image_processing_vilt import ViltImageProcessor
+ from .processing_vilt import ViltProcessor
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_vilt import (
+ VILT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ ViltForImageAndTextRetrieval,
+ ViltForImagesAndTextClassification,
+ ViltForMaskedLM,
+ ViltForQuestionAnswering,
+ ViltForTokenClassification,
+ ViltLayer,
+ ViltModel,
+ ViltPreTrainedModel,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..01778f964e3538ae3006e8f9a63f5ee42fdb4865
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/configuration_vilt.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/configuration_vilt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f4df6d367b7d7e1a51d73439f95ed427f3d0fae5
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/configuration_vilt.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/convert_vilt_original_to_pytorch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/convert_vilt_original_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9c5b3981cdb4121d044b2bda9d309d7e030d613a
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/convert_vilt_original_to_pytorch.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/feature_extraction_vilt.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/feature_extraction_vilt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f1d1303ffb9086684b93b82458f697882bfa0e9b
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/feature_extraction_vilt.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/image_processing_vilt.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/image_processing_vilt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1966b12d3648f9edcc4fdc91102feee4110083f6
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/image_processing_vilt.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/modeling_vilt.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/modeling_vilt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..01a9c3c62583b4782fd644fa2a6726d49baffaef
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/modeling_vilt.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/processing_vilt.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/processing_vilt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2a7773957730118389718900b4a5465dcb3d86c8
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/__pycache__/processing_vilt.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/configuration_vilt.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/configuration_vilt.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ad4bde69494d77b9d43c0f8f2480d2be24a3d6a
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/configuration_vilt.py
@@ -0,0 +1,147 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" VilT model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import VILT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class ViltConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`ViLTModel`]. It is used to instantiate an ViLT
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the ViLT
+ [dandelin/vilt-b32-mlm](https://huggingface.co/dandelin/vilt-b32-mlm) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 30522):
+ Vocabulary size of the text part of the model. Defines the number of different tokens that can be
+ represented by the `inputs_ids` passed when calling [`ViltModel`].
+ type_vocab_size (`int`, *optional*, defaults to 2):
+ The vocabulary size of the `token_type_ids` passed when calling [`ViltModel`]. This is used when encoding
+ text.
+ modality_type_vocab_size (`int`, *optional*, defaults to 2):
+ The vocabulary size of the modalities passed when calling [`ViltModel`]. This is used after concatening the
+ embeddings of the text and image modalities.
+ max_position_embeddings (`int`, *optional*, defaults to 40):
+ The maximum sequence length that this model might ever be used with.
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ image_size (`int`, *optional*, defaults to 384):
+ The size (resolution) of each image.
+ patch_size (`int`, *optional*, defaults to 32):
+ The size (resolution) of each patch.
+ num_channels (`int`, *optional*, defaults to 3):
+ The number of input channels.
+ qkv_bias (`bool`, *optional*, defaults to `True`):
+ Whether to add a bias to the queries, keys and values.
+ max_image_length (`int`, *optional*, defaults to -1):
+ The maximum number of patches to take as input for the Transformer encoder. If set to a positive integer,
+ the encoder will sample `max_image_length` patches at maximum. If set to -1, will not be taken into
+ account.
+ num_images (`int`, *optional*, defaults to -1):
+ The number of images to use for natural language visual reasoning. If set to a positive integer, will be
+ used by [`ViltForImagesAndTextClassification`] for defining the classifier head.
+
+ Example:
+
+ ```python
+ >>> from transformers import ViLTModel, ViLTConfig
+
+ >>> # Initializing a ViLT dandelin/vilt-b32-mlm style configuration
+ >>> configuration = ViLTConfig()
+
+ >>> # Initializing a model from the dandelin/vilt-b32-mlm style configuration
+ >>> model = ViLTModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "vilt"
+
+ def __init__(
+ self,
+ vocab_size=30522,
+ type_vocab_size=2,
+ modality_type_vocab_size=2,
+ max_position_embeddings=40,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.0,
+ attention_probs_dropout_prob=0.0,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ image_size=384,
+ patch_size=32,
+ num_channels=3,
+ qkv_bias=True,
+ max_image_length=-1,
+ tie_word_embeddings=False,
+ num_images=-1,
+ **kwargs,
+ ):
+ super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
+
+ self.vocab_size = vocab_size
+ self.type_vocab_size = type_vocab_size
+ self.modality_type_vocab_size = modality_type_vocab_size
+ self.max_position_embeddings = max_position_embeddings
+
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.intermediate_size = intermediate_size
+ self.hidden_act = hidden_act
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.qkv_bias = qkv_bias
+ self.max_image_length = max_image_length
+ self.num_images = num_images
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/convert_vilt_original_to_pytorch.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/convert_vilt_original_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..e597d0d7e778b7e0fff61e5c1eec83996170b2e1
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/convert_vilt_original_to_pytorch.py
@@ -0,0 +1,300 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert ViLT checkpoints from the original Github repository."""
+
+
+import argparse
+import json
+from pathlib import Path
+
+import requests
+import torch
+from huggingface_hub import hf_hub_download
+from PIL import Image
+
+from transformers import (
+ BertTokenizer,
+ ViltConfig,
+ ViltForImageAndTextRetrieval,
+ ViltForImagesAndTextClassification,
+ ViltForMaskedLM,
+ ViltForQuestionAnswering,
+ ViltImageProcessor,
+ ViltProcessor,
+)
+from transformers.utils import logging
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+
+# here we list all keys to be renamed (original name on the left, our name on the right)
+def create_rename_keys(config, vqa_model=False, nlvr_model=False, irtr_model=False):
+ rename_keys = []
+ for i in range(config.num_hidden_layers):
+ # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
+ rename_keys.append((f"transformer.blocks.{i}.norm1.weight", f"vilt.encoder.layer.{i}.layernorm_before.weight"))
+ rename_keys.append((f"transformer.blocks.{i}.norm1.bias", f"vilt.encoder.layer.{i}.layernorm_before.bias"))
+ rename_keys.append(
+ (f"transformer.blocks.{i}.attn.proj.weight", f"vilt.encoder.layer.{i}.attention.output.dense.weight")
+ )
+ rename_keys.append(
+ (f"transformer.blocks.{i}.attn.proj.bias", f"vilt.encoder.layer.{i}.attention.output.dense.bias")
+ )
+ rename_keys.append((f"transformer.blocks.{i}.norm2.weight", f"vilt.encoder.layer.{i}.layernorm_after.weight"))
+ rename_keys.append((f"transformer.blocks.{i}.norm2.bias", f"vilt.encoder.layer.{i}.layernorm_after.bias"))
+ rename_keys.append(
+ (f"transformer.blocks.{i}.mlp.fc1.weight", f"vilt.encoder.layer.{i}.intermediate.dense.weight")
+ )
+ rename_keys.append((f"transformer.blocks.{i}.mlp.fc1.bias", f"vilt.encoder.layer.{i}.intermediate.dense.bias"))
+ rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.weight", f"vilt.encoder.layer.{i}.output.dense.weight"))
+ rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.bias", f"vilt.encoder.layer.{i}.output.dense.bias"))
+
+ # embeddings
+ rename_keys.extend(
+ [
+ # text embeddings
+ ("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
+ (
+ "text_embeddings.position_embeddings.weight",
+ "vilt.embeddings.text_embeddings.position_embeddings.weight",
+ ),
+ ("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
+ (
+ "text_embeddings.token_type_embeddings.weight",
+ "vilt.embeddings.text_embeddings.token_type_embeddings.weight",
+ ),
+ ("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
+ ("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
+ # patch embeddings
+ ("transformer.cls_token", "vilt.embeddings.cls_token"),
+ ("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
+ ("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
+ ("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
+ # token type embeddings
+ ("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
+ ]
+ )
+
+ # final layernorm + pooler
+ rename_keys.extend(
+ [
+ ("transformer.norm.weight", "vilt.layernorm.weight"),
+ ("transformer.norm.bias", "vilt.layernorm.bias"),
+ ("pooler.dense.weight", "vilt.pooler.dense.weight"),
+ ("pooler.dense.bias", "vilt.pooler.dense.bias"),
+ ]
+ )
+
+ # classifier head(s)
+ if vqa_model:
+ # classification head
+ rename_keys.extend(
+ [
+ ("vqa_classifier.0.weight", "classifier.0.weight"),
+ ("vqa_classifier.0.bias", "classifier.0.bias"),
+ ("vqa_classifier.1.weight", "classifier.1.weight"),
+ ("vqa_classifier.1.bias", "classifier.1.bias"),
+ ("vqa_classifier.3.weight", "classifier.3.weight"),
+ ("vqa_classifier.3.bias", "classifier.3.bias"),
+ ]
+ )
+ elif nlvr_model:
+ # classification head
+ rename_keys.extend(
+ [
+ ("nlvr2_classifier.0.weight", "classifier.0.weight"),
+ ("nlvr2_classifier.0.bias", "classifier.0.bias"),
+ ("nlvr2_classifier.1.weight", "classifier.1.weight"),
+ ("nlvr2_classifier.1.bias", "classifier.1.bias"),
+ ("nlvr2_classifier.3.weight", "classifier.3.weight"),
+ ("nlvr2_classifier.3.bias", "classifier.3.bias"),
+ ]
+ )
+ else:
+ pass
+
+ return rename_keys
+
+
+# we split up the matrix of each encoder layer into queries, keys and values
+def read_in_q_k_v(state_dict, config):
+ for i in range(config.num_hidden_layers):
+ prefix = "vilt."
+ # read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
+ in_proj_weight = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.weight")
+ in_proj_bias = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.bias")
+ # next, add query, keys and values (in that order) to the state dict
+ state_dict[f"{prefix}encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[
+ : config.hidden_size, :
+ ]
+ state_dict[f"{prefix}encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size]
+ state_dict[f"{prefix}encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[
+ config.hidden_size : config.hidden_size * 2, :
+ ]
+ state_dict[f"{prefix}encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[
+ config.hidden_size : config.hidden_size * 2
+ ]
+ state_dict[f"{prefix}encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[
+ -config.hidden_size :, :
+ ]
+ state_dict[f"{prefix}encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :]
+
+
+def remove_classification_head_(state_dict):
+ ignore_keys = ["head.weight", "head.bias"]
+ for k in ignore_keys:
+ state_dict.pop(k, None)
+
+
+def rename_key(dct, old, new):
+ val = dct.pop(old)
+ dct[new] = val
+
+
+@torch.no_grad()
+def convert_vilt_checkpoint(checkpoint_url, pytorch_dump_folder_path):
+ """
+ Copy/paste/tweak model's weights to our ViLT structure.
+ """
+
+ # define configuration and initialize HuggingFace model
+ config = ViltConfig(image_size=384, patch_size=32, tie_word_embeddings=False)
+ mlm_model = False
+ vqa_model = False
+ nlvr_model = False
+ irtr_model = False
+ if "vqa" in checkpoint_url:
+ vqa_model = True
+ config.num_labels = 3129
+ repo_id = "huggingface/label-files"
+ filename = "vqa2-id2label.json"
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
+ id2label = {int(k): v for k, v in id2label.items()}
+ config.id2label = id2label
+ config.label2id = {v: k for k, v in id2label.items()}
+ model = ViltForQuestionAnswering(config)
+ elif "nlvr" in checkpoint_url:
+ nlvr_model = True
+ config.num_labels = 2
+ config.id2label = {0: "False", 1: "True"}
+ config.label2id = {v: k for k, v in config.id2label.items()}
+ config.modality_type_vocab_size = 3
+ model = ViltForImagesAndTextClassification(config)
+ elif "irtr" in checkpoint_url:
+ irtr_model = True
+ model = ViltForImageAndTextRetrieval(config)
+ elif "mlm_itm" in checkpoint_url:
+ mlm_model = True
+ model = ViltForMaskedLM(config)
+ else:
+ raise ValueError("Unknown model type")
+
+ # load state_dict of original model, remove and rename some keys
+ state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")["state_dict"]
+ rename_keys = create_rename_keys(config, vqa_model, nlvr_model, irtr_model)
+ for src, dest in rename_keys:
+ rename_key(state_dict, src, dest)
+ read_in_q_k_v(state_dict, config)
+ if mlm_model or irtr_model:
+ ignore_keys = ["itm_score.fc.weight", "itm_score.fc.bias"]
+ for k in ignore_keys:
+ state_dict.pop(k, None)
+
+ # load state dict into HuggingFace model
+ model.eval()
+ if mlm_model:
+ missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
+ assert missing_keys == ["mlm_score.decoder.bias"]
+ else:
+ model.load_state_dict(state_dict)
+
+ # Define processor
+ image_processor = ViltImageProcessor(size=384)
+ tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased")
+ processor = ViltProcessor(image_processor, tokenizer)
+
+ # Forward pass on example inputs (image + text)
+ if nlvr_model:
+ image1 = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg", stream=True).raw)
+ image2 = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg", stream=True).raw)
+ text = (
+ "The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
+ " standing."
+ )
+ encoding_1 = processor(image1, text, return_tensors="pt")
+ encoding_2 = processor(image2, text, return_tensors="pt")
+ outputs = model(
+ input_ids=encoding_1.input_ids,
+ pixel_values=encoding_1.pixel_values,
+ pixel_values_2=encoding_2.pixel_values,
+ )
+ else:
+ image = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw)
+ if mlm_model:
+ text = "a bunch of [MASK] laying on a [MASK]."
+ else:
+ text = "How many cats are there?"
+ encoding = processor(image, text, return_tensors="pt")
+ outputs = model(**encoding)
+
+ # Verify outputs
+ if mlm_model:
+ expected_shape = torch.Size([1, 11, 30522])
+ expected_slice = torch.tensor([-12.5061, -12.5123, -12.5174])
+ assert outputs.logits.shape == expected_shape
+ assert torch.allclose(outputs.logits[0, 0, :3], expected_slice, atol=1e-4)
+
+ # verify masked token prediction equals "cats"
+ predicted_id = outputs.logits[0, 4, :].argmax(-1).item()
+ assert tokenizer.decode([predicted_id]) == "cats"
+ elif vqa_model:
+ expected_shape = torch.Size([1, 3129])
+ expected_slice = torch.tensor([-15.9495, -18.1472, -10.3041])
+ assert torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)
+ assert outputs.logits.shape == expected_shape
+ assert torch.allclose(outputs.logits[0, 0, :3], expected_slice, atol=1e-4)
+
+ # verify vqa prediction equals "2"
+ predicted_idx = outputs.logits.argmax(-1).item()
+ assert model.config.id2label[predicted_idx] == "2"
+ elif nlvr_model:
+ expected_shape = torch.Size([1, 2])
+ expected_slice = torch.tensor([-2.8721, 2.1291])
+ assert torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)
+ assert outputs.logits.shape == expected_shape
+
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
+ print(f"Saving model and processor to {pytorch_dump_folder_path}")
+ model.save_pretrained(pytorch_dump_folder_path)
+ processor.save_pretrained(pytorch_dump_folder_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--checkpoint_url",
+ default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
+ type=str,
+ help="URL of the checkpoint you'd like to convert.",
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
+ )
+
+ args = parser.parse_args()
+ convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/feature_extraction_vilt.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/feature_extraction_vilt.py
new file mode 100644
index 0000000000000000000000000000000000000000..5091946bf94334dae16408346e707cf2fcaffaa4
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/feature_extraction_vilt.py
@@ -0,0 +1,33 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Feature extractor class for ViLT."""
+
+import warnings
+
+from ...utils import logging
+from .image_processing_vilt import ViltImageProcessor
+
+
+logger = logging.get_logger(__name__)
+
+
+class ViltFeatureExtractor(ViltImageProcessor):
+ def __init__(self, *args, **kwargs) -> None:
+ warnings.warn(
+ "The class ViltFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
+ " use ViltImageProcessor instead.",
+ FutureWarning,
+ )
+ super().__init__(*args, **kwargs)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/image_processing_vilt.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/image_processing_vilt.py
new file mode 100644
index 0000000000000000000000000000000000000000..42e5b3f439d6aab9d9d9fc1349df6f0cf947f28c
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/image_processing_vilt.py
@@ -0,0 +1,505 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Image processor class for Vilt."""
+
+from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
+
+import numpy as np
+
+from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
+from ...image_transforms import PaddingMode, pad, resize, to_channel_dimension_format
+from ...image_utils import (
+ IMAGENET_STANDARD_MEAN,
+ IMAGENET_STANDARD_STD,
+ ChannelDimension,
+ ImageInput,
+ PILImageResampling,
+ get_image_size,
+ infer_channel_dimension_format,
+ is_scaled_image,
+ make_list_of_images,
+ to_numpy_array,
+ valid_images,
+ validate_kwargs,
+ validate_preprocess_arguments,
+)
+from ...utils import TensorType, is_vision_available, logging
+
+
+if is_vision_available():
+ import PIL
+
+
+logger = logging.get_logger(__name__)
+
+
+def max_across_indices(values: Iterable[Any]) -> List[Any]:
+ """
+ Return the maximum value across all indices of an iterable of values.
+ """
+ return [max(values_i) for values_i in zip(*values)]
+
+
+def make_pixel_mask(
+ image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None
+) -> np.ndarray:
+ """
+ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
+
+ Args:
+ image (`np.ndarray`):
+ Image to make the pixel mask for.
+ output_size (`Tuple[int, int]`):
+ Output size of the mask.
+ """
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
+ mask = np.zeros(output_size, dtype=np.int64)
+ mask[:input_height, :input_width] = 1
+ return mask
+
+
+def get_max_height_width(
+ images: List[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]] = None
+) -> List[int]:
+ """
+ Get the maximum height and width across all images in a batch.
+ """
+ if input_data_format is None:
+ input_data_format = infer_channel_dimension_format(images[0])
+
+ if input_data_format == ChannelDimension.FIRST:
+ _, max_height, max_width = max_across_indices([img.shape for img in images])
+ elif input_data_format == ChannelDimension.LAST:
+ max_height, max_width, _ = max_across_indices([img.shape for img in images])
+ else:
+ raise ValueError(f"Invalid channel dimension format: {input_data_format}")
+ return (max_height, max_width)
+
+
+def get_resize_output_image_size(
+ input_image: np.ndarray,
+ shorter: int = 800,
+ longer: int = 1333,
+ size_divisor: int = 32,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+) -> Tuple[int, int]:
+ input_height, input_width = get_image_size(input_image, input_data_format)
+ min_size, max_size = shorter, longer
+
+ scale = min_size / min(input_height, input_width)
+
+ if input_height < input_width:
+ new_height = min_size
+ new_width = scale * input_width
+ else:
+ new_height = scale * input_height
+ new_width = min_size
+
+ if max(new_height, new_width) > max_size:
+ scale = max_size / max(new_height, new_width)
+ new_height = scale * new_height
+ new_width = scale * new_width
+
+ new_height, new_width = int(new_height + 0.5), int(new_width + 0.5)
+ new_height = new_height // size_divisor * size_divisor
+ new_width = new_width // size_divisor * size_divisor
+
+ return new_height, new_width
+
+
+class ViltImageProcessor(BaseImageProcessor):
+ r"""
+ Constructs a ViLT image processor.
+
+ Args:
+ do_resize (`bool`, *optional*, defaults to `True`):
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
+ `do_resize` parameter in the `preprocess` method.
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 384}`):
+ Resize the shorter side of the input to `size["shortest_edge"]`. The longer side will be limited to under
+ `int((1333 / 800) * size["shortest_edge"])` while preserving the aspect ratio. Only has an effect if
+ `do_resize` is set to `True`. Can be overridden by the `size` parameter in the `preprocess` method.
+ size_divisor (`int`, *optional*, defaults to 32):
+ The size by which to make sure both the height and width can be divided. Only has an effect if `do_resize`
+ is set to `True`. Can be overridden by the `size_divisor` parameter in the `preprocess` method.
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
+ Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be
+ overridden by the `resample` parameter in the `preprocess` method.
+ do_rescale (`bool`, *optional*, defaults to `True`):
+ Wwhether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
+ `do_rescale` parameter in the `preprocess` method.
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
+ Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be
+ overridden by the `rescale_factor` parameter in the `preprocess` method.
+ do_normalize (`bool`, *optional*, defaults to `True`):
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
+ method. Can be overridden by the `do_normalize` parameter in the `preprocess` method.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
+ overridden by the `image_mean` parameter in the `preprocess` method.
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
+ Can be overridden by the `image_std` parameter in the `preprocess` method.
+ do_pad (`bool`, *optional*, defaults to `True`):
+ Whether to pad the image to the `(max_height, max_width)` of the images in the batch. Can be overridden by
+ the `do_pad` parameter in the `preprocess` method.
+ """
+
+ model_input_names = ["pixel_values"]
+
+ def __init__(
+ self,
+ do_resize: bool = True,
+ size: Dict[str, int] = None,
+ size_divisor: int = 32,
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
+ do_rescale: bool = True,
+ rescale_factor: Union[int, float] = 1 / 255,
+ do_normalize: bool = True,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ do_pad: bool = True,
+ **kwargs,
+ ) -> None:
+ if "pad_and_return_pixel_mask" in kwargs:
+ do_pad = kwargs.pop("pad_and_return_pixel_mask")
+
+ super().__init__(**kwargs)
+ size = size if size is not None else {"shortest_edge": 384}
+ size = get_size_dict(size, default_to_square=False)
+
+ self.do_resize = do_resize
+ self.size = size
+ self.size_divisor = size_divisor
+ self.resample = resample
+ self.do_rescale = do_rescale
+ self.rescale_factor = rescale_factor
+ self.do_normalize = do_normalize
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
+ self.do_pad = do_pad
+ self._valid_processor_keys = [
+ "images",
+ "do_resize",
+ "size",
+ "size_divisor",
+ "resample",
+ "do_rescale",
+ "rescale_factor",
+ "do_normalize",
+ "image_mean",
+ "image_std",
+ "do_pad",
+ "return_tensors",
+ "data_format",
+ "input_data_format",
+ ]
+
+ @classmethod
+ def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs):
+ """
+ Overrides the `from_dict` method from the base class to make sure `reduce_labels` is updated if image processor
+ is created using from_dict and kwargs e.g. `ViltImageProcessor.from_pretrained(checkpoint,
+ pad_and_return_pixel_mask=False)`
+ """
+ image_processor_dict = image_processor_dict.copy()
+ if "pad_and_return_pixel_mask" in kwargs:
+ image_processor_dict["pad_and_return_pixel_mask"] = kwargs.pop("pad_and_return_pixel_mask")
+ return super().from_dict(image_processor_dict, **kwargs)
+
+ def resize(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ size_divisor: int = 32,
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> np.ndarray:
+ """
+ Resize an image.
+
+ Resizes the shorter side of the image to `size["shortest_edge"]` while preserving the aspect ratio. If the
+ longer side is larger than the max size `(int(`size["shortest_edge"]` * 1333 / 800))`, the longer side is then
+ resized to the max size while preserving the aspect ratio.
+
+ Args:
+ image (`np.ndarray`):
+ Image to resize.
+ size (`Dict[str, int]`):
+ Controls the size of the output image. Should be of the form `{"shortest_edge": int}`.
+ size_divisor (`int`, defaults to 32):
+ The image is resized to a size that is a multiple of this value.
+ resample (`PILImageResampling` filter, *optional*, defaults to `PILImageResampling.BICUBIC`):
+ Resampling filter to use when resiizing the image.
+ data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
+ input_data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred.
+ """
+ size = get_size_dict(size, default_to_square=False)
+ if "shortest_edge" not in size:
+ raise ValueError(f"The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}")
+ shorter = size["shortest_edge"]
+ longer = int(1333 / 800 * shorter)
+ output_size = get_resize_output_image_size(
+ image, shorter=shorter, longer=longer, size_divisor=size_divisor, input_data_format=input_data_format
+ )
+ return resize(
+ image,
+ size=output_size,
+ resample=resample,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ **kwargs,
+ )
+
+ def _pad_image(
+ self,
+ image: np.ndarray,
+ output_size: Tuple[int, int],
+ constant_values: Union[float, Iterable[float]] = 0,
+ data_format: Optional[ChannelDimension] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> np.ndarray:
+ """
+ Pad an image with zeros to the given size.
+ """
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
+ output_height, output_width = output_size
+
+ pad_bottom = output_height - input_height
+ pad_right = output_width - input_width
+ padding = ((0, pad_bottom), (0, pad_right))
+ padded_image = pad(
+ image,
+ padding,
+ mode=PaddingMode.CONSTANT,
+ constant_values=constant_values,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ )
+ return padded_image
+
+ def pad(
+ self,
+ images: List[np.ndarray],
+ constant_values: Union[float, Iterable[float]] = 0,
+ return_pixel_mask: bool = True,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ data_format: Optional[ChannelDimension] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> BatchFeature:
+ """
+ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width
+ in the batch and optionally returns their corresponding pixel mask.
+
+ Args:
+ image (`np.ndarray`):
+ Image to pad.
+ constant_values (`float` or `Iterable[float]`, *optional*):
+ The value to use for the padding if `mode` is `"constant"`.
+ return_pixel_mask (`bool`, *optional*, defaults to `True`):
+ Whether to return a pixel mask.
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Can be one of:
+ - Unset: Return a list of `np.ndarray`.
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
+ data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred.
+ """
+ pad_size = get_max_height_width(images, input_data_format=input_data_format)
+
+ padded_images = [
+ self._pad_image(
+ image,
+ pad_size,
+ constant_values=constant_values,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ )
+ for image in images
+ ]
+ data = {"pixel_values": padded_images}
+
+ if return_pixel_mask:
+ masks = [
+ make_pixel_mask(image=image, output_size=pad_size, input_data_format=input_data_format)
+ for image in images
+ ]
+ data["pixel_mask"] = masks
+
+ return BatchFeature(data=data, tensor_type=return_tensors)
+
+ def preprocess(
+ self,
+ images: ImageInput,
+ do_resize: Optional[bool] = None,
+ size: Optional[Dict[str, int]] = None,
+ size_divisor: Optional[int] = None,
+ resample: PILImageResampling = None,
+ do_rescale: Optional[bool] = None,
+ rescale_factor: Optional[float] = None,
+ do_normalize: Optional[bool] = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ do_pad: Optional[bool] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ data_format: ChannelDimension = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> PIL.Image.Image:
+ """
+ Preprocess an image or batch of images.
+
+ Args:
+ images (`ImageInput`):
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
+ Whether to resize the image.
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
+ Controls the size of the image after `resize`. The shortest edge of the image is resized to
+ `size["shortest_edge"]` whilst preserving the aspect ratio. If the longest edge of this resized image
+ is > `int(size["shortest_edge"] * (1333 / 800))`, then the image is resized again to make the longest
+ edge equal to `int(size["shortest_edge"] * (1333 / 800))`.
+ size_divisor (`int`, *optional*, defaults to `self.size_divisor`):
+ The image is resized to a size that is a multiple of this value.
+ resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
+ Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`.
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
+ Whether to rescale the image values between [0 - 1].
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
+ Whether to normalize the image.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
+ Image mean to normalize the image by if `do_normalize` is set to `True`.
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
+ Image standard deviation to normalize the image by if `do_normalize` is set to `True`.
+ do_pad (`bool`, *optional*, defaults to `self.do_pad`):
+ Whether to pad the image to the (max_height, max_width) in the batch. If `True`, a pixel mask is also
+ created and returned.
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Can be one of:
+ - Unset: Return a list of `np.ndarray`.
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format for the output image. Can be one of:
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ """
+ do_resize = do_resize if do_resize is not None else self.do_resize
+ size_divisor = size_divisor if size_divisor is not None else self.size_divisor
+ resample = resample if resample is not None else self.resample
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
+ image_mean = image_mean if image_mean is not None else self.image_mean
+ image_std = image_std if image_std is not None else self.image_std
+ do_pad = do_pad if do_pad is not None else self.do_pad
+
+ size = size if size is not None else self.size
+ size = get_size_dict(size, default_to_square=False)
+
+ images = make_list_of_images(images)
+
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
+
+ if not valid_images(images):
+ raise ValueError(
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+
+ # Here the pad() method does not require any additional argument as it takes the maximum of (height, width).
+ # Hence, it does not need to be passed to a validate_preprocess_arguments() method.
+ validate_preprocess_arguments(
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ )
+
+ # All transformations expect numpy arrays.
+ images = [to_numpy_array(image) for image in images]
+
+ if is_scaled_image(images[0]) and do_rescale:
+ logger.warning_once(
+ "It looks like you are trying to rescale already rescaled images. If the input"
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
+ )
+
+ if input_data_format is None:
+ # We assume that all images have the same channel dimension format.
+ input_data_format = infer_channel_dimension_format(images[0])
+
+ if do_resize:
+ images = [
+ self.resize(
+ image=image,
+ size=size,
+ size_divisor=size_divisor,
+ resample=resample,
+ input_data_format=input_data_format,
+ )
+ for image in images
+ ]
+
+ if do_rescale:
+ images = [
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ if do_normalize:
+ images = [
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ images = [
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
+ ]
+
+ if do_pad:
+ encoded_outputs = self.pad(
+ images, return_pixel_mask=True, return_tensors=return_tensors, input_data_format=data_format
+ )
+ else:
+ encoded_outputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors)
+
+ return encoded_outputs
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/modeling_vilt.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/modeling_vilt.py
new file mode 100644
index 0000000000000000000000000000000000000000..5545b881bd670a724041814ddc85a225311c00ad
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/modeling_vilt.py
@@ -0,0 +1,1488 @@
+# coding=utf-8
+# Copyright 2022 NAVER AI Labs and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch ViLT model."""
+
+import collections.abc
+import math
+from dataclasses import dataclass
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import CrossEntropyLoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import (
+ BaseModelOutput,
+ BaseModelOutputWithPooling,
+ MaskedLMOutput,
+ ModelOutput,
+ SequenceClassifierOutput,
+ TokenClassifierOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import (
+ find_pruneable_heads_and_indices,
+ meshgrid,
+ prune_linear_layer,
+)
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+from .configuration_vilt import ViltConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "ViltConfig"
+_CHECKPOINT_FOR_DOC = "dandelin/vilt-b32-mlm"
+
+
+from ..deprecated._archive_maps import VILT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+@dataclass
+class ViltForImagesAndTextClassificationOutput(ModelOutput):
+ """
+ Class for outputs of [`ViltForImagesAndTextClassification`].
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Classification (or regression if config.num_labels==1) loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
+ hidden_states (`List[tuple(torch.FloatTensor)]`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ List of tuples of `torch.FloatTensor` (one for each image-text pair, each tuple containing the output of
+ the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`List[tuple(torch.FloatTensor)]`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ List of tuples of `torch.FloatTensor` (one for each image-text pair, each tuple containing the attention
+ weights of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the
+ attention softmax, used to compute the weighted average in the self-attention heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ hidden_states: Optional[List[Tuple[torch.FloatTensor]]] = None
+ attentions: Optional[List[Tuple[torch.FloatTensor]]] = None
+
+
+class ViltEmbeddings(nn.Module):
+ """
+ Construct the text and patch embeddings.
+
+ Text embeddings are equivalent to BERT embeddings.
+
+ Patch embeddings are equivalent to ViT embeddings.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+
+ # text embeddings
+ self.text_embeddings = TextEmbeddings(config)
+ # patch embeddings
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
+ self.patch_embeddings = ViltPatchEmbeddings(config)
+ num_patches = self.patch_embeddings.num_patches
+ self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size))
+ # modality type (text/patch) embeddings
+ self.token_type_embeddings = nn.Embedding(config.modality_type_vocab_size, config.hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.config = config
+
+ def visual_embed(self, pixel_values, pixel_mask, max_image_length=200):
+ _, _, ph, pw = self.patch_embeddings.projection.weight.shape
+
+ x = self.patch_embeddings(pixel_values)
+ x_mask = pixel_mask[:, None, :, :].float()
+ x_mask = nn.functional.interpolate(x_mask, size=(x.shape[2], x.shape[3])).long()
+ x_h = x_mask[:, 0].sum(dim=1)[:, 0]
+ x_w = x_mask[:, 0].sum(dim=2)[:, 0]
+
+ batch_size, num_channels, height, width = x.shape
+ patch_dim = self.config.image_size // self.config.patch_size
+ spatial_pos = self.position_embeddings[:, 1:, :].transpose(1, 2).view(1, num_channels, patch_dim, patch_dim)
+ pos_embed = torch.cat(
+ [
+ nn.functional.pad(
+ nn.functional.interpolate(
+ spatial_pos,
+ size=(h, w),
+ mode="bilinear",
+ align_corners=True,
+ ),
+ (0, width - w, 0, height - h),
+ )
+ for h, w in zip(x_h, x_w)
+ ],
+ dim=0,
+ )
+
+ pos_embed = pos_embed.flatten(2).transpose(1, 2)
+ x = x.flatten(2).transpose(1, 2)
+ # Set `device` here, otherwise `patch_index` will always be on `CPU` and will fail near the end for torch>=1.13
+ patch_index = torch.stack(
+ meshgrid(torch.arange(x_mask.shape[-2]), torch.arange(x_mask.shape[-1]), indexing="ij"), dim=-1
+ ).to(device=x_mask.device)
+ patch_index = patch_index[None, None, :, :, :]
+ patch_index = patch_index.expand(x_mask.shape[0], x_mask.shape[1], -1, -1, -1)
+ patch_index = patch_index.flatten(1, 3)
+ x_mask = x_mask.flatten(1)
+
+ if max_image_length < 0 or max_image_length is None or not isinstance(max_image_length, int):
+ # suppose aug is 800 x 1333, then, maximum effective res is 800 x 1333 (if one side gets bigger, the other will be constrained and be shrinked)
+ # (800 // self.patch_size) * (1333 // self.patch_size) is the maximum number of patches that single image can get.
+ # if self.patch_size = 32, 25 * 41 = 1025
+ # if res is 384 x 640, 12 * 20 = 240
+ effective_resolution = x_h * x_w
+ max_image_length = effective_resolution.max()
+ else:
+ effective_resolution = x_h * x_w
+ max_image_length = min(effective_resolution.max(), max_image_length)
+
+ valid_idx = x_mask.nonzero(as_tuple=False)
+ non_valid_idx = (1 - x_mask).nonzero(as_tuple=False)
+ unique_rows = valid_idx[:, 0].unique()
+ valid_row_idx = [valid_idx[valid_idx[:, 0] == u] for u in unique_rows]
+ non_valid_row_idx = [non_valid_idx[non_valid_idx[:, 0] == u] for u in unique_rows]
+
+ valid_nums = [v.size(0) for v in valid_row_idx]
+ non_valid_nums = [v.size(0) for v in non_valid_row_idx]
+ pad_nums = [max_image_length - v for v in valid_nums]
+
+ select = []
+ for i, (v, nv, p) in enumerate(zip(valid_nums, non_valid_nums, pad_nums)):
+ if p <= 0:
+ valid_choice = torch.multinomial(torch.ones(v).float(), max_image_length)
+ select.append(valid_row_idx[i][valid_choice])
+ else:
+ pad_choice = torch.multinomial(torch.ones(nv).float(), p, replacement=True)
+ select.append(torch.cat([valid_row_idx[i], non_valid_row_idx[i][pad_choice]], dim=0))
+
+ select = torch.cat(select, dim=0)
+ x = x[select[:, 0], select[:, 1]].view(batch_size, -1, num_channels)
+ x_mask = x_mask[select[:, 0], select[:, 1]].view(batch_size, -1)
+ # `patch_index` should be on the same device as `select` (for torch>=1.13), which is ensured at definition time.
+ patch_index = patch_index[select[:, 0], select[:, 1]].view(batch_size, -1, 2)
+ pos_embed = pos_embed[select[:, 0], select[:, 1]].view(batch_size, -1, num_channels)
+
+ cls_tokens = self.cls_token.expand(batch_size, -1, -1)
+ x = torch.cat((cls_tokens, x), dim=1)
+ pos_embed = torch.cat(
+ (self.position_embeddings[:, 0, :][:, None, :].expand(batch_size, -1, -1), pos_embed), dim=1
+ )
+ x = x + pos_embed
+ x = self.dropout(x)
+
+ x_mask = torch.cat([torch.ones(x_mask.shape[0], 1).to(x_mask), x_mask], dim=1)
+
+ return x, x_mask, (patch_index, (height, width))
+
+ def forward(
+ self,
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ pixel_values,
+ pixel_mask,
+ inputs_embeds,
+ image_embeds,
+ image_token_type_idx=1,
+ ):
+ # PART 1: text embeddings
+ text_embeds = self.text_embeddings(
+ input_ids=input_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
+ )
+
+ # PART 2: patch embeddings (with interpolated position encodings)
+ if image_embeds is None:
+ image_embeds, image_masks, patch_index = self.visual_embed(
+ pixel_values, pixel_mask, max_image_length=self.config.max_image_length
+ )
+ else:
+ image_masks = pixel_mask.flatten(1)
+
+ # PART 3: add modality type embeddings
+ # 0 indicates text, 1 indicates image, 2 is optionally used when a second image is provided (NLVR2)
+ if image_token_type_idx is None:
+ image_token_type_idx = 1
+ text_embeds = text_embeds + self.token_type_embeddings(
+ torch.zeros_like(attention_mask, dtype=torch.long, device=text_embeds.device)
+ )
+ image_embeds = image_embeds + self.token_type_embeddings(
+ torch.full_like(image_masks, image_token_type_idx, dtype=torch.long, device=text_embeds.device)
+ )
+
+ # PART 4: concatenate
+ embeddings = torch.cat([text_embeds, image_embeds], dim=1)
+ masks = torch.cat([attention_mask, image_masks], dim=1)
+
+ return embeddings, masks
+
+
+class TextEmbeddings(nn.Module):
+ """Construct the embeddings from word, position and token_type embeddings."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
+
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
+ # any TensorFlow checkpoint file
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
+ )
+ self.register_buffer(
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
+ )
+
+ def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
+ if input_ids is not None:
+ input_shape = input_ids.size()
+ else:
+ input_shape = inputs_embeds.size()[:-1]
+
+ seq_length = input_shape[1]
+
+ if position_ids is None:
+ position_ids = self.position_ids[:, :seq_length]
+
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
+ # issue #5664
+ if token_type_ids is None:
+ if hasattr(self, "token_type_ids"):
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
+ token_type_ids = buffered_token_type_ids_expanded
+ else:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
+
+ embeddings = inputs_embeds + token_type_embeddings
+ if self.position_embedding_type == "absolute":
+ position_embeddings = self.position_embeddings(position_ids)
+ embeddings += position_embeddings
+ embeddings = self.LayerNorm(embeddings)
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+
+class ViltPatchEmbeddings(nn.Module):
+ """
+ Image to Patch Embedding.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ image_size, patch_size = config.image_size, config.patch_size
+ num_channels, hidden_size = config.num_channels, config.hidden_size
+
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.num_patches = num_patches
+
+ self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
+
+ def forward(self, pixel_values):
+ batch_size, num_channels, height, width = pixel_values.shape
+ if num_channels != self.num_channels:
+ raise ValueError(
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
+ )
+ target_dtype = self.projection.weight.dtype
+ x = self.projection(pixel_values.to(dtype=target_dtype))
+ return x
+
+
+class ViltSelfAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
+ f"heads {config.num_attention_heads}."
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+
+ def transpose_for_scores(self, x):
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(*new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False):
+ mixed_query_layer = self.query(hidden_states)
+
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.Softmax(dim=-1)(attention_scores)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(*new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ return outputs
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->Vilt
+class ViltSelfOutput(nn.Module):
+ """
+ The residual connection is defined in ViltLayer instead of here (as is the case with other models), due to the
+ layernorm applied before each block.
+ """
+
+ def __init__(self, config: ViltConfig) -> None:
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ return hidden_states
+
+
+class ViltAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.attention = ViltSelfAttention(config)
+ self.output = ViltSelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.attention.query = prune_linear_layer(self.attention.query, index)
+ self.attention.key = prune_linear_layer(self.attention.key, index)
+ self.attention.value = prune_linear_layer(self.attention.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False):
+ self_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions)
+
+ attention_output = self.output(self_outputs[0], hidden_states)
+
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTIntermediate with ViT->Vilt
+class ViltIntermediate(nn.Module):
+ def __init__(self, config: ViltConfig) -> None:
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+
+ return hidden_states
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTOutput with ViT->Vilt
+class ViltOutput(nn.Module):
+ def __init__(self, config: ViltConfig) -> None:
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ hidden_states = hidden_states + input_tensor
+
+ return hidden_states
+
+
+class ViltLayer(nn.Module):
+ """This corresponds to the Block class in the timm implementation."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.attention = ViltAttention(config)
+ self.intermediate = ViltIntermediate(config)
+ self.output = ViltOutput(config)
+ self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False):
+ self_attention_outputs = self.attention(
+ self.layernorm_before(hidden_states), # in ViLT, layernorm is applied before self-attention
+ attention_mask,
+ head_mask,
+ output_attentions=output_attentions,
+ )
+ attention_output = self_attention_outputs[0]
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ # first residual connection
+ hidden_states = attention_output + hidden_states.to(attention_output.device)
+
+ # in ViLT, layernorm is also applied after self-attention
+ layer_output = self.layernorm_after(hidden_states)
+ layer_output = self.intermediate(layer_output)
+
+ # second residual connection is done here
+ layer_output = self.output(layer_output, hidden_states)
+
+ outputs = (layer_output,) + outputs
+
+ return outputs
+
+
+class ViltEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList([ViltLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ output_attentions=False,
+ output_hidden_states=False,
+ return_dict=True,
+ ):
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, output_attentions)
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+
+class ViltPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = ViltConfig
+ base_model_prefix = "vilt"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["ViltEmbeddings", "ViltSelfAttention"]
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+VILT_START_DOCSTRING = r"""
+ This model is a PyTorch `torch.nn.Module `_ subclass. Use
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`ViltConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+VILT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
+ IDs?](../glossary#input-ids)
+
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ [What are attention masks?](../glossary#attention-mask)
+
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+ [What are token type IDs?](../glossary#token-type-ids)
+
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
+ [`ViltImageProcessor.__call__`] for details.
+
+ pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
+ Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`:
+
+ - 1 for pixels that are real (i.e. **not masked**),
+ - 0 for pixels that are padding (i.e. **masked**).
+ `What are attention masks? <../glossary.html#attention-mask>`__
+
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*):
+ Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `pixel_values` into patch embeddings.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+VILT_IMAGES_AND_TEXT_CLASSIFICATION_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
+ IDs?](../glossary#input-ids)
+
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ [What are attention masks?](../glossary#attention-mask)
+
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+ [What are token type IDs?](../glossary#token-type-ids)
+
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_images, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
+ [`ViltImageProcessor.__call__`] for details.
+
+ pixel_mask (`torch.LongTensor` of shape `(batch_size, num_images, height, width)`, *optional*):
+ Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`:
+
+ - 1 for pixels that are real (i.e. **not masked**),
+ - 0 for pixels that are padding (i.e. **masked**).
+ `What are attention masks? <../glossary.html#attention-mask>`__
+
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, num_images, num_patches, hidden_size)`, *optional*):
+ Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `pixel_values` into patch embeddings.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare ViLT Model transformer outputting raw hidden-states without any specific head on top.",
+ VILT_START_DOCSTRING,
+)
+class ViltModel(ViltPreTrainedModel):
+ def __init__(self, config, add_pooling_layer=True):
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = ViltEmbeddings(config)
+ self.encoder = ViltEncoder(config)
+
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.pooler = ViltPooler(config) if add_pooling_layer else None
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.text_embeddings.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.text_embeddings.word_embeddings = value
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(VILT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ pixel_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ image_embeds: Optional[torch.FloatTensor] = None,
+ image_token_type_idx: Optional[int] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[BaseModelOutputWithPooling, Tuple[torch.FloatTensor]]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import ViltProcessor, ViltModel
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> # prepare image and text
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+ >>> text = "hello world"
+
+ >>> processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-mlm")
+ >>> model = ViltModel.from_pretrained("dandelin/vilt-b32-mlm")
+
+ >>> inputs = processor(image, text, return_tensors="pt")
+ >>> outputs = model(**inputs)
+ >>> last_hidden_states = outputs.last_hidden_state
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ text_batch_size, seq_length = input_shape
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ if attention_mask is None:
+ attention_mask = torch.ones(((text_batch_size, seq_length)), device=device)
+
+ if pixel_values is not None and image_embeds is not None:
+ raise ValueError("You cannot specify both pixel_values and image_embeds at the same time")
+ elif pixel_values is None and image_embeds is None:
+ raise ValueError("You have to specify either pixel_values or image_embeds")
+
+ image_batch_size = pixel_values.shape[0] if pixel_values is not None else image_embeds.shape[0]
+ if image_batch_size != text_batch_size:
+ raise ValueError("The text inputs and image inputs need to have the same batch size")
+ if pixel_mask is None:
+ pixel_mask = torch.ones((image_batch_size, self.config.image_size, self.config.image_size), device=device)
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ embedding_output, attention_mask = self.embeddings(
+ input_ids,
+ attention_mask,
+ token_type_ids,
+ pixel_values,
+ pixel_mask,
+ inputs_embeds,
+ image_embeds,
+ image_token_type_idx=image_token_type_idx,
+ )
+
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
+ # ourselves in which case we just need to make it broadcastable to all heads.
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+ sequence_output = self.layernorm(sequence_output)
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
+
+ if not return_dict:
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPooling(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+class ViltPooler(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.activation = nn.Tanh()
+
+ def forward(self, hidden_states):
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(first_token_tensor)
+ pooled_output = self.activation(pooled_output)
+ return pooled_output
+
+
+@add_start_docstrings(
+ """
+ ViLT Model with a language modeling head on top as done during pretraining.
+ """,
+ VILT_START_DOCSTRING,
+)
+class ViltForMaskedLM(ViltPreTrainedModel):
+ _tied_weights_keys = ["mlm_score.decoder.weight", "mlm_score.decoder.bias"]
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.vilt = ViltModel(config)
+ self.mlm_score = ViltMLMHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.mlm_score.decoder
+
+ def set_output_embeddings(self, new_embeddings):
+ self.mlm_score.decoder = new_embeddings
+
+ @add_start_docstrings_to_model_forward(VILT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ pixel_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ image_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[MaskedLMOutput, Tuple[torch.FloatTensor]]:
+ r"""
+ labels (*torch.LongTensor* of shape *(batch_size, sequence_length)*, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in *[-100, 0, ...,
+ config.vocab_size]* (see *input_ids* docstring) Tokens with indices set to *-100* are ignored (masked), the
+ loss is only computed for the tokens with labels in *[0, ..., config.vocab_size]*
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import ViltProcessor, ViltForMaskedLM
+ >>> import requests
+ >>> from PIL import Image
+ >>> import re
+ >>> import torch
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+ >>> text = "a bunch of [MASK] laying on a [MASK]."
+
+ >>> processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-mlm")
+ >>> model = ViltForMaskedLM.from_pretrained("dandelin/vilt-b32-mlm")
+
+ >>> # prepare inputs
+ >>> encoding = processor(image, text, return_tensors="pt")
+
+ >>> # forward pass
+ >>> outputs = model(**encoding)
+
+ >>> tl = len(re.findall("\[MASK\]", text))
+ >>> inferred_token = [text]
+
+ >>> # gradually fill in the MASK tokens, one by one
+ >>> with torch.no_grad():
+ ... for i in range(tl):
+ ... encoded = processor.tokenizer(inferred_token)
+ ... input_ids = torch.tensor(encoded.input_ids)
+ ... encoded = encoded["input_ids"][0][1:-1]
+ ... outputs = model(input_ids=input_ids, pixel_values=encoding.pixel_values)
+ ... mlm_logits = outputs.logits[0] # shape (seq_len, vocab_size)
+ ... # only take into account text features (minus CLS and SEP token)
+ ... mlm_logits = mlm_logits[1 : input_ids.shape[1] - 1, :]
+ ... mlm_values, mlm_ids = mlm_logits.softmax(dim=-1).max(dim=-1)
+ ... # only take into account text
+ ... mlm_values[torch.tensor(encoded) != 103] = 0
+ ... select = mlm_values.argmax().item()
+ ... encoded[select] = mlm_ids[select].item()
+ ... inferred_token = [processor.decode(encoded)]
+
+ >>> selected_token = ""
+ >>> encoded = processor.tokenizer(inferred_token)
+ >>> output = processor.decode(encoded.input_ids[0], skip_special_tokens=True)
+ >>> print(output)
+ a bunch of cats laying on a couch.
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.vilt(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ pixel_values=pixel_values,
+ pixel_mask=pixel_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ image_embeds=image_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output, pooled_output = outputs[:2]
+ # split up final hidden states into text and image features
+ text_seq_len = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
+ text_features, _ = (sequence_output[:, :text_seq_len], sequence_output[:, text_seq_len:])
+
+ mlm_logits = self.mlm_score(text_features)
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
+ # move labels to correct device to enable PP
+ labels = labels.to(mlm_logits.device)
+ masked_lm_loss = loss_fct(mlm_logits.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (mlm_logits,) + outputs[2:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return MaskedLMOutput(
+ loss=masked_lm_loss,
+ logits=mlm_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+class ViltPredictionHeadTransform(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ if isinstance(config.hidden_act, str):
+ self.transform_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.transform_act_fn = config.hidden_act
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ def forward(self, hidden_states):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.transform_act_fn(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states)
+ return hidden_states
+
+
+class ViltMLMHead(nn.Module):
+ def __init__(self, config, weight=None):
+ super().__init__()
+ self.config = config
+ self.transform = ViltPredictionHeadTransform(config)
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
+ if weight is not None:
+ self.decoder.weight = weight
+
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
+ self.decoder.bias = self.bias
+
+ def forward(self, x):
+ x = self.transform(x)
+ x = self.decoder(x)
+ return x
+
+
+@add_start_docstrings(
+ """
+ Vilt Model transformer with a classifier head on top (a linear layer on top of the final hidden state of the [CLS]
+ token) for visual question answering, e.g. for VQAv2.
+ """,
+ VILT_START_DOCSTRING,
+)
+class ViltForQuestionAnswering(ViltPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.num_labels = config.num_labels
+ self.vilt = ViltModel(config)
+
+ # Classifier head
+ self.classifier = nn.Sequential(
+ nn.Linear(config.hidden_size, config.hidden_size * 2),
+ nn.LayerNorm(config.hidden_size * 2),
+ nn.GELU(),
+ nn.Linear(config.hidden_size * 2, config.num_labels),
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(VILT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ pixel_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ image_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[SequenceClassifierOutput, Tuple[torch.FloatTensor]]:
+ r"""
+ labels (`torch.FloatTensor` of shape `(batch_size, num_labels)`, *optional*):
+ Labels for computing the visual question answering loss. This tensor must be either a one-hot encoding of
+ all answers that are applicable for a given example in the batch, or a soft encoding indicating which
+ answers are applicable, where 1.0 is the highest score.
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import ViltProcessor, ViltForQuestionAnswering
+ >>> import requests
+ >>> from PIL import Image
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+ >>> text = "How many cats are there?"
+
+ >>> processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-finetuned-vqa")
+ >>> model = ViltForQuestionAnswering.from_pretrained("dandelin/vilt-b32-finetuned-vqa")
+
+ >>> # prepare inputs
+ >>> encoding = processor(image, text, return_tensors="pt")
+
+ >>> # forward pass
+ >>> outputs = model(**encoding)
+ >>> logits = outputs.logits
+ >>> idx = logits.argmax(-1).item()
+ >>> print("Predicted answer:", model.config.id2label[idx])
+ Predicted answer: 2
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.vilt(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ pixel_values=pixel_values,
+ pixel_mask=pixel_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ image_embeds=image_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooler_output = outputs.pooler_output if return_dict else outputs[1]
+
+ logits = self.classifier(pooler_output)
+
+ loss = None
+ if labels is not None:
+ # move labels to correct device to enable PP
+ labels = labels.to(logits.device)
+ loss = nn.functional.binary_cross_entropy_with_logits(logits, labels) * labels.shape[1]
+ # see https://github.com/jnhwkim/ban-vqa/blob/master/train.py#L19
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Vilt Model transformer with a classifier head on top (a linear layer on top of the final hidden state of the [CLS]
+ token) for image-to-text or text-to-image retrieval, e.g. MSCOCO and F30K.
+ """,
+ VILT_START_DOCSTRING,
+)
+class ViltForImageAndTextRetrieval(ViltPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.vilt = ViltModel(config)
+
+ # Classifier head
+ self.rank_output = nn.Linear(config.hidden_size, 1)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(VILT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ pixel_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ image_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[SequenceClassifierOutput, Tuple[torch.FloatTensor]]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels are currently not supported.
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import ViltProcessor, ViltForImageAndTextRetrieval
+ >>> import requests
+ >>> from PIL import Image
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+ >>> texts = ["An image of two cats chilling on a couch", "A football player scoring a goal"]
+
+ >>> processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-finetuned-coco")
+ >>> model = ViltForImageAndTextRetrieval.from_pretrained("dandelin/vilt-b32-finetuned-coco")
+
+ >>> # forward pass
+ >>> scores = dict()
+ >>> for text in texts:
+ ... # prepare inputs
+ ... encoding = processor(image, text, return_tensors="pt")
+ ... outputs = model(**encoding)
+ ... scores[text] = outputs.logits[0, :].item()
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.vilt(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ pixel_values=pixel_values,
+ pixel_mask=pixel_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ image_embeds=image_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooler_output = outputs.pooler_output if return_dict else outputs[1]
+
+ logits = self.rank_output(pooler_output)
+
+ loss = None
+ if labels is not None:
+ # move labels to correct device to enable PP
+ labels = labels.to(logits.device)
+ raise NotImplementedError("Training is not yet supported.")
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Vilt Model transformer with a classifier head on top for natural language visual reasoning, e.g. NLVR2.
+ """,
+ VILT_IMAGES_AND_TEXT_CLASSIFICATION_INPUTS_DOCSTRING,
+)
+class ViltForImagesAndTextClassification(ViltPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.num_labels = config.num_labels
+ self.vilt = ViltModel(config)
+
+ # Classifier head
+ num_images = config.num_images
+ self.classifier = nn.Sequential(
+ nn.Linear(config.hidden_size * num_images, config.hidden_size * num_images),
+ nn.LayerNorm(config.hidden_size * num_images),
+ nn.GELU(),
+ nn.Linear(config.hidden_size * num_images, config.num_labels),
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(VILT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=ViltForImagesAndTextClassificationOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ pixel_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ image_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[ViltForImagesAndTextClassificationOutput, Tuple[torch.FloatTensor]]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Binary classification labels.
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import ViltProcessor, ViltForImagesAndTextClassification
+ >>> import requests
+ >>> from PIL import Image
+
+ >>> image1 = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg", stream=True).raw)
+ >>> image2 = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_1.jpg", stream=True).raw)
+ >>> text = "The left image contains twice the number of dogs as the right image."
+
+ >>> processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-finetuned-nlvr2")
+ >>> model = ViltForImagesAndTextClassification.from_pretrained("dandelin/vilt-b32-finetuned-nlvr2")
+
+ >>> # prepare inputs
+ >>> encoding = processor([image1, image2], text, return_tensors="pt")
+
+ >>> # forward pass
+ >>> outputs = model(input_ids=encoding.input_ids, pixel_values=encoding.pixel_values.unsqueeze(0))
+ >>> logits = outputs.logits
+ >>> idx = logits.argmax(-1).item()
+ >>> print("Predicted answer:", model.config.id2label[idx])
+ Predicted answer: True
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is not None and pixel_values.ndim == 4:
+ # add dummy num_images dimension
+ pixel_values = pixel_values.unsqueeze(1)
+
+ if image_embeds is not None and image_embeds.ndim == 3:
+ # add dummy num_images dimension
+ image_embeds = image_embeds.unsqueeze(1)
+
+ num_images = pixel_values.shape[1] if pixel_values is not None else None
+ if num_images is None:
+ num_images = image_embeds.shape[1] if image_embeds is not None else None
+ if num_images != self.config.num_images:
+ raise ValueError(
+ "Make sure to match the number of images in the model with the number of images in the input."
+ )
+ pooler_outputs = []
+ hidden_states = [] if output_hidden_states else None
+ attentions = [] if output_attentions else None
+ for i in range(num_images):
+ # forward every image through the model
+ outputs = self.vilt(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ pixel_values=pixel_values[:, i, :, :, :] if pixel_values is not None else None,
+ pixel_mask=pixel_mask[:, i, :, :] if pixel_mask is not None else None,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ image_embeds=image_embeds[:, i, :, :] if image_embeds is not None else None,
+ image_token_type_idx=i + 1,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ pooler_output = outputs.pooler_output if return_dict else outputs[1]
+ pooler_outputs.append(pooler_output)
+ if output_hidden_states:
+ hidden_states.append(outputs.hidden_states)
+ if output_attentions:
+ attentions.append(outputs.attentions)
+
+ pooled_output = torch.cat(pooler_outputs, dim=-1)
+ logits = self.classifier(pooled_output)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ # move labels to correct device to enable PP
+ labels = labels.to(logits.device)
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits, hidden_states, attentions)
+ return ((loss,) + output) if loss is not None else output
+
+ return ViltForImagesAndTextClassificationOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=hidden_states,
+ attentions=attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ ViLT Model with a token classification head on top (a linear layer on top of the final hidden-states of the text
+ tokens) e.g. for Named-Entity-Recognition (NER) tasks.
+ """,
+ VILT_START_DOCSTRING,
+)
+class ViltForTokenClassification(ViltPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.num_labels = config.num_labels
+ self.vilt = ViltModel(config, add_pooling_layer=False)
+
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(VILT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ pixel_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ image_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[TokenClassifierOutput, Tuple[torch.FloatTensor]]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, text_sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+
+ Returns:
+ """
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.vilt(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ pixel_values=pixel_values,
+ pixel_mask=pixel_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ image_embeds=image_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ text_input_size = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
+
+ sequence_output = self.dropout(sequence_output)
+ logits = self.classifier(sequence_output[:, :text_input_size])
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ # move labels to correct device to enable PP
+ labels = labels.to(logits.device)
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/processing_vilt.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/processing_vilt.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ccb884ea00c9d1b9df3322281083ddf166e5dc9
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/vilt/processing_vilt.py
@@ -0,0 +1,148 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Processor class for ViLT.
+"""
+
+import warnings
+from typing import List, Optional, Union
+
+from ...processing_utils import ProcessorMixin
+from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
+from ...utils import TensorType
+
+
+class ViltProcessor(ProcessorMixin):
+ r"""
+ Constructs a ViLT processor which wraps a BERT tokenizer and ViLT image processor into a single processor.
+
+ [`ViltProcessor`] offers all the functionalities of [`ViltImageProcessor`] and [`BertTokenizerFast`]. See the
+ docstring of [`~ViltProcessor.__call__`] and [`~ViltProcessor.decode`] for more information.
+
+ Args:
+ image_processor (`ViltImageProcessor`, *optional*):
+ An instance of [`ViltImageProcessor`]. The image processor is a required input.
+ tokenizer (`BertTokenizerFast`, *optional*):
+ An instance of ['BertTokenizerFast`]. The tokenizer is a required input.
+ """
+
+ attributes = ["image_processor", "tokenizer"]
+ image_processor_class = "ViltImageProcessor"
+ tokenizer_class = ("BertTokenizer", "BertTokenizerFast")
+
+ def __init__(self, image_processor=None, tokenizer=None, **kwargs):
+ feature_extractor = None
+ if "feature_extractor" in kwargs:
+ warnings.warn(
+ "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
+ " instead.",
+ FutureWarning,
+ )
+ feature_extractor = kwargs.pop("feature_extractor")
+
+ image_processor = image_processor if image_processor is not None else feature_extractor
+ if image_processor is None:
+ raise ValueError("You need to specify an `image_processor`.")
+ if tokenizer is None:
+ raise ValueError("You need to specify a `tokenizer`.")
+
+ super().__init__(image_processor, tokenizer)
+ self.current_processor = self.image_processor
+
+ def __call__(
+ self,
+ images,
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ This method uses [`ViltImageProcessor.__call__`] method to prepare image(s) for the model, and
+ [`BertTokenizerFast.__call__`] to prepare text for the model.
+
+ Please refer to the docstring of the above two methods for more information.
+ """
+ encoding = self.tokenizer(
+ text=text,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ return_tensors=return_tensors,
+ **kwargs,
+ )
+ # add pixel_values + pixel_mask
+ encoding_image_processor = self.image_processor(images, return_tensors=return_tensors)
+ encoding.update(encoding_image_processor)
+
+ return encoding
+
+ def batch_decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
+ refer to the docstring of this method for more information.
+ """
+ return self.tokenizer.batch_decode(*args, **kwargs)
+
+ def decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
+ the docstring of this method for more information.
+ """
+ return self.tokenizer.decode(*args, **kwargs)
+
+ @property
+ def model_input_names(self):
+ tokenizer_input_names = self.tokenizer.model_input_names
+ image_processor_input_names = self.image_processor.model_input_names
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
+
+ @property
+ def feature_extractor_class(self):
+ warnings.warn(
+ "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
+ FutureWarning,
+ )
+ return self.image_processor_class
+
+ @property
+ def feature_extractor(self):
+ warnings.warn(
+ "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",
+ FutureWarning,
+ )
+ return self.image_processor
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1dd57a90b92744f3fb2be5fc29fead5ee974021e
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm/__init__.py
@@ -0,0 +1,105 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
+
+
+_import_structure = {
+ "configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
+ "tokenization_xlm": ["XLMTokenizer"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_xlm"] = [
+ "XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "XLMForMultipleChoice",
+ "XLMForQuestionAnswering",
+ "XLMForQuestionAnsweringSimple",
+ "XLMForSequenceClassification",
+ "XLMForTokenClassification",
+ "XLMModel",
+ "XLMPreTrainedModel",
+ "XLMWithLMHeadModel",
+ ]
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_xlm"] = [
+ "TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TFXLMForMultipleChoice",
+ "TFXLMForQuestionAnsweringSimple",
+ "TFXLMForSequenceClassification",
+ "TFXLMForTokenClassification",
+ "TFXLMMainLayer",
+ "TFXLMModel",
+ "TFXLMPreTrainedModel",
+ "TFXLMWithLMHeadModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
+ from .tokenization_xlm import XLMTokenizer
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_xlm import (
+ XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
+ XLMForMultipleChoice,
+ XLMForQuestionAnswering,
+ XLMForQuestionAnsweringSimple,
+ XLMForSequenceClassification,
+ XLMForTokenClassification,
+ XLMModel,
+ XLMPreTrainedModel,
+ XLMWithLMHeadModel,
+ )
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_xlm import (
+ TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TFXLMForMultipleChoice,
+ TFXLMForQuestionAnsweringSimple,
+ TFXLMForSequenceClassification,
+ TFXLMForTokenClassification,
+ TFXLMMainLayer,
+ TFXLMModel,
+ TFXLMPreTrainedModel,
+ TFXLMWithLMHeadModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cfbf4b30400f3df0ae8b0bf4a1c7f4b8f41160d0
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm/__pycache__/configuration_xlm.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm/__pycache__/configuration_xlm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..94c01384f1be5f8d2ab028d920a21783f40805f7
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm/__pycache__/configuration_xlm.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm/__pycache__/convert_xlm_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm/__pycache__/convert_xlm_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..22267157ca5af1ae7a36d8ac98b47ed8f2579924
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm/__pycache__/convert_xlm_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm/__pycache__/modeling_tf_xlm.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm/__pycache__/modeling_tf_xlm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..427a3f423011d4f2e67b22507b54e6ea01b640af
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm/__pycache__/modeling_tf_xlm.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm/__pycache__/modeling_xlm.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm/__pycache__/modeling_xlm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f49372924f01292d834c3e5cece623efc0028dd4
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm/__pycache__/modeling_xlm.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm/__pycache__/tokenization_xlm.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm/__pycache__/tokenization_xlm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5760f4d08fb3f02b89d552971a2fb7dd96cf62c5
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm/__pycache__/tokenization_xlm.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm/modeling_tf_xlm.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm/modeling_tf_xlm.py
new file mode 100644
index 0000000000000000000000000000000000000000..45447a4236e118da6cd5b4bd156015c95cf84a1b
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm/modeling_tf_xlm.py
@@ -0,0 +1,1349 @@
+# coding=utf-8
+# Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+ TF 2.0 XLM model.
+"""
+
+
+from __future__ import annotations
+
+import itertools
+import warnings
+from dataclasses import dataclass
+from typing import Dict, Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ...activations_tf import get_tf_activation
+from ...modeling_tf_outputs import (
+ TFBaseModelOutput,
+ TFMultipleChoiceModelOutput,
+ TFQuestionAnsweringModelOutput,
+ TFSequenceClassifierOutput,
+ TFTokenClassifierOutput,
+)
+from ...modeling_tf_utils import (
+ TFModelInputType,
+ TFMultipleChoiceLoss,
+ TFPreTrainedModel,
+ TFQuestionAnsweringLoss,
+ TFSequenceClassificationLoss,
+ TFSequenceSummary,
+ TFSharedEmbeddings,
+ TFTokenClassificationLoss,
+ get_initializer,
+ keras,
+ keras_serializable,
+ unpack_inputs,
+)
+from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
+from ...utils import (
+ MULTIPLE_CHOICE_DUMMY_INPUTS,
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+)
+from .configuration_xlm import XLMConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "FacebookAI/xlm-mlm-en-2048"
+_CONFIG_FOR_DOC = "XLMConfig"
+
+
+from ..deprecated._archive_maps import TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+def create_sinusoidal_embeddings(n_pos, dim, out):
+ position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)])
+ out[:, 0::2] = tf.constant(np.sin(position_enc[:, 0::2]))
+ out[:, 1::2] = tf.constant(np.cos(position_enc[:, 1::2]))
+
+
+def get_masks(slen, lengths, causal, padding_mask=None):
+ """
+ Generate hidden states mask, and optionally an attention mask.
+ """
+ bs = shape_list(lengths)[0]
+ if padding_mask is not None:
+ mask = padding_mask
+ else:
+ # assert lengths.max().item() <= slen
+ alen = tf.range(slen, dtype=lengths.dtype)
+ mask = alen < tf.expand_dims(lengths, axis=1)
+
+ # attention mask is the same as mask, or triangular inferior attention (causal)
+ if causal:
+ attn_mask = tf.less_equal(
+ tf.tile(tf.reshape(alen, (1, 1, slen)), (bs, slen, 1)), tf.reshape(alen, (1, slen, 1))
+ )
+ else:
+ attn_mask = mask
+
+ # sanity check
+ # assert shape_list(mask) == [bs, slen]
+ tf.debugging.assert_equal(shape_list(mask), [bs, slen])
+ if causal:
+ tf.debugging.assert_equal(shape_list(attn_mask), [bs, slen, slen])
+
+ return mask, attn_mask
+
+
+class TFXLMMultiHeadAttention(keras.layers.Layer):
+ NEW_ID = itertools.count()
+
+ def __init__(self, n_heads, dim, config, **kwargs):
+ super().__init__(**kwargs)
+ self.layer_id = next(TFXLMMultiHeadAttention.NEW_ID)
+ self.dim = dim
+ self.n_heads = n_heads
+ self.output_attentions = config.output_attentions
+ assert self.dim % self.n_heads == 0
+
+ self.q_lin = keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name="q_lin")
+ self.k_lin = keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name="k_lin")
+ self.v_lin = keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name="v_lin")
+ self.out_lin = keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name="out_lin")
+ self.dropout = keras.layers.Dropout(config.attention_dropout)
+ self.pruned_heads = set()
+ self.dim = dim
+
+ def prune_heads(self, heads):
+ raise NotImplementedError
+
+ def call(self, input, mask, kv, cache, head_mask, output_attentions, training=False):
+ """
+ Self-attention (if kv is None) or attention over source sentence (provided by kv).
+ """
+ # Input is (bs, qlen, dim)
+ # Mask is (bs, klen) (non-causal) or (bs, klen, klen)
+ bs, qlen, dim = shape_list(input)
+
+ if kv is None:
+ klen = qlen if cache is None else cache["slen"] + qlen
+ else:
+ klen = shape_list(kv)[1]
+
+ # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'
+ dim_per_head = self.dim // self.n_heads
+ mask_reshape = (bs, 1, qlen, klen) if len(shape_list(mask)) == 3 else (bs, 1, 1, klen)
+
+ def shape(x):
+ """projection"""
+ return tf.transpose(tf.reshape(x, (bs, -1, self.n_heads, dim_per_head)), perm=(0, 2, 1, 3))
+
+ def unshape(x):
+ """compute context"""
+ return tf.reshape(tf.transpose(x, perm=(0, 2, 1, 3)), (bs, -1, self.n_heads * dim_per_head))
+
+ q = shape(self.q_lin(input)) # (bs, n_heads, qlen, dim_per_head)
+
+ if kv is None:
+ k = shape(self.k_lin(input)) # (bs, n_heads, qlen, dim_per_head)
+ v = shape(self.v_lin(input)) # (bs, n_heads, qlen, dim_per_head)
+ elif cache is None or self.layer_id not in cache:
+ k = v = kv
+ k = shape(self.k_lin(k)) # (bs, n_heads, qlen, dim_per_head)
+ v = shape(self.v_lin(v)) # (bs, n_heads, qlen, dim_per_head)
+
+ if cache is not None:
+ if self.layer_id in cache:
+ if kv is None:
+ k_, v_ = cache[self.layer_id]
+ k = tf.concat([k_, k], axis=2) # (bs, n_heads, klen, dim_per_head)
+ v = tf.concat([v_, v], axis=2) # (bs, n_heads, klen, dim_per_head)
+ else:
+ k, v = cache[self.layer_id]
+
+ cache[self.layer_id] = (k, v)
+
+ f_dim_per_head = tf.cast(dim_per_head, dtype=q.dtype)
+ q = tf.multiply(q, tf.math.rsqrt(f_dim_per_head)) # (bs, n_heads, qlen, dim_per_head)
+ k = tf.cast(k, dtype=q.dtype)
+ scores = tf.matmul(q, k, transpose_b=True) # (bs, n_heads, qlen, klen)
+ mask = tf.reshape(mask, mask_reshape) # (bs, n_heads, qlen, klen)
+ # scores.masked_fill_(mask, -float('inf')) # (bs, n_heads, qlen, klen)
+ mask = tf.cast(mask, dtype=scores.dtype)
+ scores = scores - 1e30 * (1.0 - mask)
+ weights = stable_softmax(scores, axis=-1) # (bs, n_heads, qlen, klen)
+ weights = self.dropout(weights, training=training) # (bs, n_heads, qlen, klen)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ weights = weights * head_mask
+
+ context = tf.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)
+ context = unshape(context) # (bs, qlen, dim)
+ outputs = (self.out_lin(context),)
+
+ if output_attentions:
+ outputs = outputs + (weights,)
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "q_lin", None) is not None:
+ with tf.name_scope(self.q_lin.name):
+ self.q_lin.build([None, None, self.dim])
+ if getattr(self, "k_lin", None) is not None:
+ with tf.name_scope(self.k_lin.name):
+ self.k_lin.build([None, None, self.dim])
+ if getattr(self, "v_lin", None) is not None:
+ with tf.name_scope(self.v_lin.name):
+ self.v_lin.build([None, None, self.dim])
+ if getattr(self, "out_lin", None) is not None:
+ with tf.name_scope(self.out_lin.name):
+ self.out_lin.build([None, None, self.dim])
+
+
+class TFXLMTransformerFFN(keras.layers.Layer):
+ def __init__(self, in_dim, dim_hidden, out_dim, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.lin1 = keras.layers.Dense(dim_hidden, kernel_initializer=get_initializer(config.init_std), name="lin1")
+ self.lin2 = keras.layers.Dense(out_dim, kernel_initializer=get_initializer(config.init_std), name="lin2")
+ self.act = get_tf_activation("gelu") if config.gelu_activation else get_tf_activation("relu")
+ self.dropout = keras.layers.Dropout(config.dropout)
+ self.in_dim = in_dim
+ self.dim_hidden = dim_hidden
+
+ def call(self, input, training=False):
+ x = self.lin1(input)
+ x = self.act(x)
+ x = self.lin2(x)
+ x = self.dropout(x, training=training)
+
+ return x
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "lin1", None) is not None:
+ with tf.name_scope(self.lin1.name):
+ self.lin1.build([None, None, self.in_dim])
+ if getattr(self, "lin2", None) is not None:
+ with tf.name_scope(self.lin2.name):
+ self.lin2.build([None, None, self.dim_hidden])
+
+
+@keras_serializable
+class TFXLMMainLayer(keras.layers.Layer):
+ config_class = XLMConfig
+
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.output_hidden_states = config.output_hidden_states
+ self.output_attentions = config.output_attentions
+ self.return_dict = config.use_return_dict
+
+ # encoder / decoder, output layer
+ self.is_encoder = config.is_encoder
+ self.is_decoder = not config.is_encoder
+
+ if self.is_decoder:
+ raise NotImplementedError("Currently XLM can only be used as an encoder")
+
+ # self.with_output = with_output
+ self.causal = config.causal
+
+ # dictionary / languages
+ self.n_langs = config.n_langs
+ self.use_lang_emb = config.use_lang_emb
+ self.n_words = config.n_words
+ self.eos_index = config.eos_index
+ self.pad_index = config.pad_index
+ # self.dico = dico
+ # self.id2lang = config.id2lang
+ # self.lang2id = config.lang2id
+ # assert len(self.dico) == self.n_words
+ # assert len(self.id2lang) == len(self.lang2id) == self.n_langs
+
+ # model parameters
+ self.dim = config.emb_dim # 512 by default
+ self.hidden_dim = self.dim * 4 # 2048 by default
+ self.n_heads = config.n_heads # 8 by default
+ self.n_layers = config.n_layers
+ self.max_position_embeddings = config.max_position_embeddings
+ self.embed_init_std = config.embed_init_std
+ if self.dim % self.n_heads != 0:
+ raise ValueError("transformer dim must be a multiple of n_heads")
+
+ # embeddings
+ self.dropout = keras.layers.Dropout(config.dropout)
+ self.attention_dropout = keras.layers.Dropout(config.attention_dropout)
+
+ if config.sinusoidal_embeddings:
+ raise NotImplementedError
+ # create_sinusoidal_embeddings(config.max_position_embeddings, self.dim, out=self.position_embeddings.weight)
+
+ self.embeddings = TFSharedEmbeddings(
+ self.n_words, self.dim, initializer_range=config.embed_init_std, name="embeddings"
+ ) # padding_idx=self.pad_index)
+ self.layer_norm_emb = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm_emb")
+
+ # transformer layers
+ self.attentions = []
+ self.layer_norm1 = []
+ self.ffns = []
+ self.layer_norm2 = []
+ # if self.is_decoder:
+ # self.layer_norm15 = []
+ # self.encoder_attn = []
+
+ for i in range(self.n_layers):
+ self.attentions.append(
+ TFXLMMultiHeadAttention(self.n_heads, self.dim, config=config, name=f"attentions_._{i}")
+ )
+ self.layer_norm1.append(
+ keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=f"layer_norm1_._{i}")
+ )
+ # if self.is_decoder:
+ # self.layer_norm15.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))
+ # self.encoder_attn.append(MultiHeadAttention(self.n_heads, self.dim, dropout=self.attention_dropout))
+ self.ffns.append(
+ TFXLMTransformerFFN(self.dim, self.hidden_dim, self.dim, config=config, name=f"ffns_._{i}")
+ )
+ self.layer_norm2.append(
+ keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=f"layer_norm2_._{i}")
+ )
+
+ if hasattr(config, "pruned_heads"):
+ pruned_heads = config.pruned_heads.copy().items()
+ config.pruned_heads = {}
+
+ for layer, heads in pruned_heads:
+ if self.attentions[int(layer)].n_heads == config.n_heads:
+ self.prune_heads({int(layer): list(map(int, heads))})
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ with tf.name_scope("position_embeddings"):
+ self.position_embeddings = self.add_weight(
+ name="embeddings",
+ shape=[self.max_position_embeddings, self.dim],
+ initializer=get_initializer(self.embed_init_std),
+ )
+
+ if self.n_langs > 1 and self.use_lang_emb:
+ with tf.name_scope("lang_embeddings"):
+ self.lang_embeddings = self.add_weight(
+ name="embeddings",
+ shape=[self.n_langs, self.dim],
+ initializer=get_initializer(self.embed_init_std),
+ )
+ if getattr(self, "embeddings", None) is not None:
+ with tf.name_scope(self.embeddings.name):
+ self.embeddings.build(None)
+ if getattr(self, "layer_norm_emb", None) is not None:
+ with tf.name_scope(self.layer_norm_emb.name):
+ self.layer_norm_emb.build([None, None, self.dim])
+ for layer in self.attentions:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+ for layer in self.layer_norm1:
+ with tf.name_scope(layer.name):
+ layer.build([None, None, self.dim])
+ for layer in self.ffns:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+ for layer in self.layer_norm2:
+ with tf.name_scope(layer.name):
+ layer.build([None, None, self.dim])
+
+ def get_input_embeddings(self):
+ return self.embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.weight = value
+ self.embeddings.vocab_size = shape_list(value)[0]
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ raise NotImplementedError
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ langs=None,
+ token_type_ids=None,
+ position_ids=None,
+ lengths=None,
+ cache=None,
+ head_mask=None,
+ inputs_embeds=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ training=False,
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
+ # removed: src_enc=None, src_len=None
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ bs, slen = shape_list(input_ids)
+ elif inputs_embeds is not None:
+ bs, slen = shape_list(inputs_embeds)[:2]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if lengths is None:
+ if input_ids is not None:
+ lengths = tf.reduce_sum(
+ tf.cast(tf.not_equal(input_ids, self.pad_index), dtype=input_ids.dtype), axis=1
+ )
+ else:
+ lengths = tf.convert_to_tensor([slen] * bs)
+ # mask = input_ids != self.pad_index
+
+ # check inputs
+ # assert shape_list(lengths)[0] == bs
+ (
+ tf.debugging.assert_equal(shape_list(lengths)[0], bs),
+ f"Expected batch size {shape_list(lengths)[0]} and received batch size {bs} mismatched",
+ )
+ # assert lengths.max().item() <= slen
+ # input_ids = input_ids.transpose(0, 1) # batch size as dimension 0
+ # assert (src_enc is None) == (src_len is None)
+ # if src_enc is not None:
+ # assert self.is_decoder
+ # assert src_enc.size(0) == bs
+
+ # generate masks
+ mask, attn_mask = get_masks(slen, lengths, self.causal, padding_mask=attention_mask)
+ # if self.is_decoder and src_enc is not None:
+ # src_mask = torch.arange(src_len.max(), dtype=torch.long, device=lengths.device) < src_len[:, None]
+
+ # position_ids
+ if position_ids is None:
+ position_ids = tf.expand_dims(tf.range(slen), axis=0)
+ position_ids = tf.tile(position_ids, (bs, 1))
+
+ # assert shape_list(position_ids) == [bs, slen] # (slen, bs)
+ (
+ tf.debugging.assert_equal(shape_list(position_ids), [bs, slen]),
+ f"Position id shape {shape_list(position_ids)} and input shape {[bs, slen]} mismatched",
+ )
+ # position_ids = position_ids.transpose(0, 1)
+
+ # langs
+ if langs is not None:
+ # assert shape_list(langs) == [bs, slen] # (slen, bs)
+ (
+ tf.debugging.assert_equal(shape_list(langs), [bs, slen]),
+ f"Lang shape {shape_list(langs)} and input shape {[bs, slen]} mismatched",
+ )
+ # langs = langs.transpose(0, 1)
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x qlen x klen]
+ if head_mask is not None:
+ raise NotImplementedError
+ else:
+ head_mask = [None] * self.n_layers
+
+ # do not recompute cached elements
+ if cache is not None and input_ids is not None:
+ _slen = slen - cache["slen"]
+ input_ids = input_ids[:, -_slen:]
+ position_ids = position_ids[:, -_slen:]
+ if langs is not None:
+ langs = langs[:, -_slen:]
+ mask = mask[:, -_slen:]
+ attn_mask = attn_mask[:, -_slen:]
+
+ # embeddings
+ if inputs_embeds is None:
+ check_embeddings_within_bounds(input_ids, self.embeddings.vocab_size)
+ inputs_embeds = self.embeddings(input_ids)
+
+ tensor = inputs_embeds + tf.gather(self.position_embeddings, position_ids)
+
+ if langs is not None and self.use_lang_emb and self.n_langs > 1:
+ tensor = tensor + tf.gather(self.lang_embeddings, langs)
+ if token_type_ids is not None:
+ tensor = tensor + self.embeddings(token_type_ids)
+
+ tensor = self.layer_norm_emb(tensor)
+ tensor = self.dropout(tensor, training=training)
+ mask = tf.cast(mask, dtype=tensor.dtype)
+ tensor = tensor * tf.expand_dims(mask, axis=-1)
+
+ # transformer layers
+ hidden_states = () if output_hidden_states else None
+ attentions = () if output_attentions else None
+
+ for i in range(self.n_layers):
+ if output_hidden_states:
+ hidden_states = hidden_states + (tensor,)
+
+ # self attention
+ attn_outputs = self.attentions[i](
+ tensor,
+ attn_mask,
+ None,
+ cache,
+ head_mask[i],
+ output_attentions,
+ training=training,
+ )
+ attn = attn_outputs[0]
+
+ if output_attentions:
+ attentions = attentions + (attn_outputs[1],)
+
+ attn = self.dropout(attn, training=training)
+ tensor = tensor + attn
+ tensor = self.layer_norm1[i](tensor)
+
+ # encoder attention (for decoder only)
+ # if self.is_decoder and src_enc is not None:
+ # attn = self.encoder_attn[i](tensor, src_mask, kv=src_enc, cache=cache)
+ # attn = nn.functional.dropout(attn, p=self.dropout, training=self.training)
+ # tensor = tensor + attn
+ # tensor = self.layer_norm15[i](tensor)
+
+ # FFN
+ tensor = tensor + self.ffns[i](tensor)
+ tensor = self.layer_norm2[i](tensor)
+ tensor = tensor * tf.expand_dims(mask, axis=-1)
+
+ # Add last hidden state
+ if output_hidden_states:
+ hidden_states = hidden_states + (tensor,)
+
+ # update cache length
+ if cache is not None:
+ cache["slen"] += tensor.size(1)
+
+ # move back sequence length to dimension 0
+ # tensor = tensor.transpose(0, 1)
+
+ if not return_dict:
+ return tuple(v for v in [tensor, hidden_states, attentions] if v is not None)
+
+ return TFBaseModelOutput(last_hidden_state=tensor, hidden_states=hidden_states, attentions=attentions)
+
+
+class TFXLMPreTrainedModel(TFPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = XLMConfig
+ base_model_prefix = "transformer"
+
+ @property
+ def dummy_inputs(self):
+ # Sometimes XLM has language embeddings so don't forget to build them as well if needed
+ inputs_list = tf.constant([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]], dtype=tf.int32)
+ attns_list = tf.constant([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]], dtype=tf.int32)
+ if self.config.use_lang_emb and self.config.n_langs > 1:
+ return {
+ "input_ids": inputs_list,
+ "attention_mask": attns_list,
+ "langs": tf.constant([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]], dtype=tf.int32),
+ }
+ else:
+ return {"input_ids": inputs_list, "attention_mask": attns_list}
+
+
+# Remove when XLMWithLMHead computes loss like other LM models
+@dataclass
+class TFXLMWithLMHeadModelOutput(ModelOutput):
+ """
+ Base class for [`TFXLMWithLMHeadModel`] outputs.
+
+ Args:
+ logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ logits: tf.Tensor = None
+ hidden_states: Tuple[tf.Tensor, ...] | None = None
+ attentions: Tuple[tf.Tensor, ...] | None = None
+
+
+XLM_START_DOCSTRING = r"""
+
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TensorFlow models and layers in `transformers` accept two formats as input:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional argument.
+
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
+ positional argument:
+
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
+
+ Note that when creating models and layers with
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
+ about any of this, as you can just pass inputs like you would to any other Python function!
+
+
+
+ Parameters:
+ config ([`XLMConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+XLM_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
+ [`PreTrainedTokenizer.encode`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ langs (`tf.Tensor` or `Numpy array` of shape `({0})`, *optional*):
+ A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are
+ languages ids which can be obtained from the language names by using two conversion mappings provided in
+ the configuration of the model (only provided for multilingual models). More precisely, the *language name
+ to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the
+ *language id to language name* mapping is in `model.config.id2lang` (dictionary int to string).
+
+ See usage examples detailed in the [multilingual documentation](../multilingual).
+ token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ lengths (`tf.Tensor` or `Numpy array` of shape `(batch_size,)`, *optional*):
+ Length of each sentence that can be used to avoid performing attention on padding token indices. You can
+ also use *attention_mask* for the same result (see above), kept here for compatibility. Indices selected in
+ `[0, ..., input_ids.size(-1)]`.
+ cache (`Dict[str, tf.Tensor]`, *optional*):
+ Dictionary string to `tf.Tensor` that contains precomputed hidden states (key and values in the attention
+ blocks) as computed by the model (see `cache` output below). Can be used to speed up sequential decoding.
+
+ The dictionary object will be modified in-place during the forward pass to add newly computed
+ hidden-states.
+ head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
+ config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+"""
+
+
+@add_start_docstrings(
+ "The bare XLM Model transformer outputting raw hidden-states without any specific head on top.",
+ XLM_START_DOCSTRING,
+)
+class TFXLMModel(TFXLMPreTrainedModel):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.transformer = TFXLMMainLayer(config, name="transformer")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFBaseModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: tf.Tensor | None = None,
+ langs: tf.Tensor | None = None,
+ token_type_ids: tf.Tensor | None = None,
+ position_ids: tf.Tensor | None = None,
+ lengths: tf.Tensor | None = None,
+ cache: Dict[str, tf.Tensor] | None = None,
+ head_mask: tf.Tensor | None = None,
+ inputs_embeds: tf.Tensor | None = None,
+ output_attentions: bool | None = None,
+ output_hidden_states: bool | None = None,
+ return_dict: bool | None = None,
+ training: bool = False,
+ ) -> TFBaseModelOutput | Tuple[tf.Tensor]:
+ outputs = self.transformer(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ langs=langs,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ lengths=lengths,
+ cache=cache,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "transformer", None) is not None:
+ with tf.name_scope(self.transformer.name):
+ self.transformer.build(None)
+
+
+class TFXLMPredLayer(keras.layers.Layer):
+ """
+ Prediction layer (cross_entropy or adaptive_softmax).
+ """
+
+ def __init__(self, config, input_embeddings, **kwargs):
+ super().__init__(**kwargs)
+
+ self.asm = config.asm
+ self.n_words = config.n_words
+ self.pad_index = config.pad_index
+
+ if config.asm is False:
+ self.input_embeddings = input_embeddings
+ else:
+ raise NotImplementedError
+ # self.proj = nn.AdaptiveLogSoftmaxWithLoss(
+ # in_features=dim,
+ # n_classes=config.n_words,
+ # cutoffs=config.asm_cutoffs,
+ # div_value=config.asm_div_value,
+ # head_bias=True, # default is False
+ # )
+
+ def build(self, input_shape):
+ # The output weights are the same as the input embeddings, but there is an output-only bias for each token.
+ self.bias = self.add_weight(shape=(self.n_words,), initializer="zeros", trainable=True, name="bias")
+
+ super().build(input_shape)
+
+ def get_output_embeddings(self):
+ return self.input_embeddings
+
+ def set_output_embeddings(self, value):
+ self.input_embeddings.weight = value
+ self.input_embeddings.vocab_size = shape_list(value)[0]
+
+ def get_bias(self):
+ return {"bias": self.bias}
+
+ def set_bias(self, value):
+ self.bias = value["bias"]
+ self.vocab_size = shape_list(value["bias"])[0]
+
+ def call(self, hidden_states):
+ hidden_states = self.input_embeddings(hidden_states, mode="linear")
+ hidden_states = hidden_states + self.bias
+
+ return hidden_states
+
+
+@add_start_docstrings(
+ """
+ The XLM Model transformer with a language modeling head on top (linear layer with weights tied to the input
+ embeddings).
+ """,
+ XLM_START_DOCSTRING,
+)
+class TFXLMWithLMHeadModel(TFXLMPreTrainedModel):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.transformer = TFXLMMainLayer(config, name="transformer")
+ self.pred_layer = TFXLMPredLayer(config, self.transformer.embeddings, name="pred_layer_._proj")
+ # XLM does not have past caching features
+ self.supports_xla_generation = False
+
+ def get_lm_head(self):
+ return self.pred_layer
+
+ def get_prefix_bias_name(self):
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
+ return self.name + "/" + self.pred_layer.name
+
+ def prepare_inputs_for_generation(self, inputs, **kwargs):
+ mask_token_id = self.config.mask_token_id
+ lang_id = self.config.lang_id
+
+ effective_batch_size = inputs.shape[0]
+ mask_token = tf.fill((effective_batch_size, 1), 1) * mask_token_id
+ inputs = tf.concat([inputs, mask_token], axis=1)
+
+ if lang_id is not None:
+ langs = tf.ones_like(inputs) * lang_id
+ else:
+ langs = None
+ return {"input_ids": inputs, "langs": langs}
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFXLMWithLMHeadModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ langs: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ lengths: np.ndarray | tf.Tensor | None = None,
+ cache: Optional[Dict[str, tf.Tensor]] = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[TFXLMWithLMHeadModelOutput, Tuple[tf.Tensor]]:
+ transformer_outputs = self.transformer(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ langs=langs,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ lengths=lengths,
+ cache=cache,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ output = transformer_outputs[0]
+ outputs = self.pred_layer(output)
+
+ if not return_dict:
+ return (outputs,) + transformer_outputs[1:]
+
+ return TFXLMWithLMHeadModelOutput(
+ logits=outputs, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "transformer", None) is not None:
+ with tf.name_scope(self.transformer.name):
+ self.transformer.build(None)
+ if getattr(self, "pred_layer", None) is not None:
+ with tf.name_scope(self.pred_layer.name):
+ self.pred_layer.build(None)
+
+
+@add_start_docstrings(
+ """
+ XLM Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g.
+ for GLUE tasks.
+ """,
+ XLM_START_DOCSTRING,
+)
+class TFXLMForSequenceClassification(TFXLMPreTrainedModel, TFSequenceClassificationLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.num_labels = config.num_labels
+
+ self.transformer = TFXLMMainLayer(config, name="transformer")
+ self.sequence_summary = TFSequenceSummary(config, initializer_range=config.init_std, name="sequence_summary")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFSequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ langs: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ lengths: np.ndarray | tf.Tensor | None = None,
+ cache: Optional[Dict[str, tf.Tensor]] = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: bool = False,
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ transformer_outputs = self.transformer(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ langs=langs,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ lengths=lengths,
+ cache=cache,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ output = transformer_outputs[0]
+
+ logits = self.sequence_summary(output)
+
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
+
+ if not return_dict:
+ output = (logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFSequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "transformer", None) is not None:
+ with tf.name_scope(self.transformer.name):
+ self.transformer.build(None)
+ if getattr(self, "sequence_summary", None) is not None:
+ with tf.name_scope(self.sequence_summary.name):
+ self.sequence_summary.build(None)
+
+
+@add_start_docstrings(
+ """
+ XLM Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
+ softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ XLM_START_DOCSTRING,
+)
+class TFXLMForMultipleChoice(TFXLMPreTrainedModel, TFMultipleChoiceLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.transformer = TFXLMMainLayer(config, name="transformer")
+ self.sequence_summary = TFSequenceSummary(config, initializer_range=config.init_std, name="sequence_summary")
+ self.logits_proj = keras.layers.Dense(
+ 1, kernel_initializer=get_initializer(config.initializer_range), name="logits_proj"
+ )
+ self.config = config
+
+ @property
+ def dummy_inputs(self):
+ """
+ Dummy inputs to build the network.
+
+ Returns:
+ tf.Tensor with dummy inputs
+ """
+ # Sometimes XLM has language embeddings so don't forget to build them as well if needed
+ if self.config.use_lang_emb and self.config.n_langs > 1:
+ return {
+ "input_ids": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS, dtype=tf.int32),
+ "langs": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS, dtype=tf.int32),
+ }
+ else:
+ return {
+ "input_ids": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS, dtype=tf.int32),
+ }
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFMultipleChoiceModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ langs: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ lengths: np.ndarray | tf.Tensor | None = None,
+ cache: Optional[Dict[str, tf.Tensor]] = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: bool = False,
+ ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
+ if input_ids is not None:
+ num_choices = shape_list(input_ids)[1]
+ seq_length = shape_list(input_ids)[2]
+ else:
+ num_choices = shape_list(inputs_embeds)[1]
+ seq_length = shape_list(inputs_embeds)[2]
+
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
+ flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
+ flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
+ flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None
+ flat_langs = tf.reshape(langs, (-1, seq_length)) if langs is not None else None
+ flat_inputs_embeds = (
+ tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3]))
+ if inputs_embeds is not None
+ else None
+ )
+
+ if lengths is not None:
+ logger.warning(
+ "The `lengths` parameter cannot be used with the XLM multiple choice models. Please use the "
+ "attention mask instead.",
+ )
+ lengths = None
+
+ transformer_outputs = self.transformer(
+ flat_input_ids,
+ flat_attention_mask,
+ flat_langs,
+ flat_token_type_ids,
+ flat_position_ids,
+ lengths,
+ cache,
+ head_mask,
+ flat_inputs_embeds,
+ output_attentions,
+ output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ output = transformer_outputs[0]
+ logits = self.sequence_summary(output)
+ logits = self.logits_proj(logits)
+ reshaped_logits = tf.reshape(logits, (-1, num_choices))
+
+ loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits)
+
+ if not return_dict:
+ output = (reshaped_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFMultipleChoiceModelOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "transformer", None) is not None:
+ with tf.name_scope(self.transformer.name):
+ self.transformer.build(None)
+ if getattr(self, "sequence_summary", None) is not None:
+ with tf.name_scope(self.sequence_summary.name):
+ self.sequence_summary.build(None)
+ if getattr(self, "logits_proj", None) is not None:
+ with tf.name_scope(self.logits_proj.name):
+ self.logits_proj.build([None, None, self.config.num_labels])
+
+
+@add_start_docstrings(
+ """
+ XLM Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
+ Named-Entity-Recognition (NER) tasks.
+ """,
+ XLM_START_DOCSTRING,
+)
+class TFXLMForTokenClassification(TFXLMPreTrainedModel, TFTokenClassificationLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.num_labels = config.num_labels
+
+ self.transformer = TFXLMMainLayer(config, name="transformer")
+ self.dropout = keras.layers.Dropout(config.dropout)
+ self.classifier = keras.layers.Dense(
+ config.num_labels, kernel_initializer=get_initializer(config.init_std), name="classifier"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFTokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ langs: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ lengths: np.ndarray | tf.Tensor | None = None,
+ cache: Optional[Dict[str, tf.Tensor]] = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: bool = False,
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+ transformer_outputs = self.transformer(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ langs=langs,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ lengths=lengths,
+ cache=cache,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = transformer_outputs[0]
+
+ sequence_output = self.dropout(sequence_output, training=training)
+ logits = self.classifier(sequence_output)
+
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
+
+ if not return_dict:
+ output = (logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFTokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "transformer", None) is not None:
+ with tf.name_scope(self.transformer.name):
+ self.transformer.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.hidden_size])
+
+
+@add_start_docstrings(
+ """
+ XLM Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer
+ on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ XLM_START_DOCSTRING,
+)
+class TFXLMForQuestionAnsweringSimple(TFXLMPreTrainedModel, TFQuestionAnsweringLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.transformer = TFXLMMainLayer(config, name="transformer")
+ self.qa_outputs = keras.layers.Dense(
+ config.num_labels, kernel_initializer=get_initializer(config.init_std), name="qa_outputs"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFQuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ langs: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ lengths: np.ndarray | tf.Tensor | None = None,
+ cache: Optional[Dict[str, tf.Tensor]] = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ start_positions: np.ndarray | tf.Tensor | None = None,
+ end_positions: np.ndarray | tf.Tensor | None = None,
+ training: bool = False,
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
+ r"""
+ start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ transformer_outputs = self.transformer(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ langs=langs,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ lengths=lengths,
+ cache=cache,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = transformer_outputs[0]
+
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = tf.split(logits, 2, axis=-1)
+ start_logits = tf.squeeze(start_logits, axis=-1)
+ end_logits = tf.squeeze(end_logits, axis=-1)
+
+ loss = None
+ if start_positions is not None and end_positions is not None:
+ labels = {"start_position": start_positions}
+ labels["end_position"] = end_positions
+ loss = self.hf_compute_loss(labels, (start_logits, end_logits))
+
+ if not return_dict:
+ output = (start_logits, end_logits) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFQuestionAnsweringModelOutput(
+ loss=loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "transformer", None) is not None:
+ with tf.name_scope(self.transformer.name):
+ self.transformer.build(None)
+ if getattr(self, "qa_outputs", None) is not None:
+ with tf.name_scope(self.qa_outputs.name):
+ self.qa_outputs.build([None, None, self.config.hidden_size])
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm/modeling_xlm.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm/modeling_xlm.py
new file mode 100644
index 0000000000000000000000000000000000000000..aca93ffb6a30b2aec2e3699020e997a42d2e9bcb
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/xlm/modeling_xlm.py
@@ -0,0 +1,1264 @@
+# coding=utf-8
+# Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+ PyTorch XLM model.
+"""
+
+import itertools
+import math
+from dataclasses import dataclass
+from typing import Dict, Optional, Tuple, Union
+
+import numpy as np
+import torch
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import gelu
+from ...modeling_outputs import (
+ BaseModelOutput,
+ MaskedLMOutput,
+ MultipleChoiceModelOutput,
+ QuestionAnsweringModelOutput,
+ SequenceClassifierOutput,
+ TokenClassifierOutput,
+)
+from ...modeling_utils import PreTrainedModel, SequenceSummary, SQuADHead
+from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_xlm import XLMConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "FacebookAI/xlm-mlm-en-2048"
+_CONFIG_FOR_DOC = "XLMConfig"
+
+
+from ..deprecated._archive_maps import XLM_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+def create_sinusoidal_embeddings(n_pos, dim, out):
+ position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)])
+ out.requires_grad = False
+ out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
+ out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
+ out.detach_()
+
+
+def get_masks(slen, lengths, causal, padding_mask=None):
+ """
+ Generate hidden states mask, and optionally an attention mask.
+ """
+ alen = torch.arange(slen, dtype=torch.long, device=lengths.device)
+ if padding_mask is not None:
+ mask = padding_mask
+ else:
+ assert lengths.max().item() <= slen
+ mask = alen < lengths[:, None]
+
+ # attention mask is the same as mask, or triangular inferior attention (causal)
+ bs = lengths.size(0)
+ if causal:
+ attn_mask = alen[None, None, :].repeat(bs, slen, 1) <= alen[None, :, None]
+ else:
+ attn_mask = mask
+
+ # sanity check
+ assert mask.size() == (bs, slen)
+ assert causal is False or attn_mask.size() == (bs, slen, slen)
+
+ return mask, attn_mask
+
+
+class MultiHeadAttention(nn.Module):
+ NEW_ID = itertools.count()
+
+ def __init__(self, n_heads, dim, config):
+ super().__init__()
+ self.layer_id = next(MultiHeadAttention.NEW_ID)
+ self.dim = dim
+ self.n_heads = n_heads
+ self.dropout = config.attention_dropout
+ assert self.dim % self.n_heads == 0
+
+ self.q_lin = nn.Linear(dim, dim)
+ self.k_lin = nn.Linear(dim, dim)
+ self.v_lin = nn.Linear(dim, dim)
+ self.out_lin = nn.Linear(dim, dim)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ attention_head_size = self.dim // self.n_heads
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(heads, self.n_heads, attention_head_size, self.pruned_heads)
+ # Prune linear layers
+ self.q_lin = prune_linear_layer(self.q_lin, index)
+ self.k_lin = prune_linear_layer(self.k_lin, index)
+ self.v_lin = prune_linear_layer(self.v_lin, index)
+ self.out_lin = prune_linear_layer(self.out_lin, index, dim=1)
+ # Update hyper params
+ self.n_heads = self.n_heads - len(heads)
+ self.dim = attention_head_size * self.n_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(self, input, mask, kv=None, cache=None, head_mask=None, output_attentions=False):
+ """
+ Self-attention (if kv is None) or attention over source sentence (provided by kv).
+ """
+ # Input is (bs, qlen, dim)
+ # Mask is (bs, klen) (non-causal) or (bs, klen, klen)
+ bs, qlen, dim = input.size()
+ if kv is None:
+ klen = qlen if cache is None else cache["slen"] + qlen
+ else:
+ klen = kv.size(1)
+ # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'
+ n_heads = self.n_heads
+ dim_per_head = self.dim // n_heads
+ mask_reshape = (bs, 1, qlen, klen) if mask.dim() == 3 else (bs, 1, 1, klen)
+
+ def shape(x):
+ """projection"""
+ return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2)
+
+ def unshape(x):
+ """compute context"""
+ return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head)
+
+ q = shape(self.q_lin(input)) # (bs, n_heads, qlen, dim_per_head)
+ if kv is None:
+ k = shape(self.k_lin(input)) # (bs, n_heads, qlen, dim_per_head)
+ v = shape(self.v_lin(input)) # (bs, n_heads, qlen, dim_per_head)
+ elif cache is None or self.layer_id not in cache:
+ k = v = kv
+ k = shape(self.k_lin(k)) # (bs, n_heads, qlen, dim_per_head)
+ v = shape(self.v_lin(v)) # (bs, n_heads, qlen, dim_per_head)
+
+ if cache is not None:
+ if self.layer_id in cache:
+ if kv is None:
+ k_, v_ = cache[self.layer_id]
+ k = torch.cat([k_, k], dim=2) # (bs, n_heads, klen, dim_per_head)
+ v = torch.cat([v_, v], dim=2) # (bs, n_heads, klen, dim_per_head)
+ else:
+ k, v = cache[self.layer_id]
+ cache[self.layer_id] = (k, v)
+
+ q = q / math.sqrt(dim_per_head) # (bs, n_heads, qlen, dim_per_head)
+ scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, qlen, klen)
+ mask = (mask == 0).view(mask_reshape).expand_as(scores) # (bs, n_heads, qlen, klen)
+ scores.masked_fill_(mask, torch.finfo(scores.dtype).min) # (bs, n_heads, qlen, klen)
+
+ weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) # (bs, n_heads, qlen, klen)
+ weights = nn.functional.dropout(weights, p=self.dropout, training=self.training) # (bs, n_heads, qlen, klen)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ weights = weights * head_mask
+
+ context = torch.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)
+ context = unshape(context) # (bs, qlen, dim)
+
+ outputs = (self.out_lin(context),)
+ if output_attentions:
+ outputs = outputs + (weights,)
+ return outputs
+
+
+class TransformerFFN(nn.Module):
+ def __init__(self, in_dim, dim_hidden, out_dim, config):
+ super().__init__()
+ self.dropout = config.dropout
+ self.lin1 = nn.Linear(in_dim, dim_hidden)
+ self.lin2 = nn.Linear(dim_hidden, out_dim)
+ self.act = gelu if config.gelu_activation else nn.functional.relu
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+
+ def forward(self, input):
+ return apply_chunking_to_forward(self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, input)
+
+ def ff_chunk(self, input):
+ x = self.lin1(input)
+ x = self.act(x)
+ x = self.lin2(x)
+ x = nn.functional.dropout(x, p=self.dropout, training=self.training)
+ return x
+
+
+class XLMPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = XLMConfig
+ load_tf_weights = None
+ base_model_prefix = "transformer"
+
+ def __init__(self, *inputs, **kwargs):
+ super().__init__(*inputs, **kwargs)
+
+ @property
+ def dummy_inputs(self):
+ inputs_list = torch.tensor([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]])
+ attns_list = torch.tensor([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]])
+ if self.config.use_lang_emb and self.config.n_langs > 1:
+ langs_list = torch.tensor([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]])
+ else:
+ langs_list = None
+ return {"input_ids": inputs_list, "attention_mask": attns_list, "langs": langs_list}
+
+ def _init_weights(self, module):
+ """Initialize the weights."""
+ if isinstance(module, nn.Embedding):
+ if self.config is not None and self.config.embed_init_std is not None:
+ nn.init.normal_(module.weight, mean=0, std=self.config.embed_init_std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ if isinstance(module, nn.Linear):
+ if self.config is not None and self.config.init_std is not None:
+ nn.init.normal_(module.weight, mean=0, std=self.config.init_std)
+ if module.bias is not None:
+ nn.init.constant_(module.bias, 0.0)
+ if isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+ if isinstance(module, XLMModel) and self.config.sinusoidal_embeddings:
+ create_sinusoidal_embeddings(
+ self.config.max_position_embeddings, self.config.emb_dim, out=module.position_embeddings.weight
+ )
+
+
+@dataclass
+class XLMForQuestionAnsweringOutput(ModelOutput):
+ """
+ Base class for outputs of question answering models using a `SquadHead`.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned if both `start_positions` and `end_positions` are provided):
+ Classification loss as the sum of start token, end token (and is_impossible if provided) classification
+ losses.
+ start_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
+ Log probabilities for the top config.start_n_top start token possibilities (beam-search).
+ start_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
+ Indices for the top config.start_n_top start token possibilities (beam-search).
+ end_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
+ Log probabilities for the top `config.start_n_top * config.end_n_top` end token possibilities
+ (beam-search).
+ end_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
+ Indices for the top `config.start_n_top * config.end_n_top` end token possibilities (beam-search).
+ cls_logits (`torch.FloatTensor` of shape `(batch_size,)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
+ Log probabilities for the `is_impossible` label of the answers.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ start_top_log_probs: Optional[torch.FloatTensor] = None
+ start_top_index: Optional[torch.LongTensor] = None
+ end_top_log_probs: Optional[torch.FloatTensor] = None
+ end_top_index: Optional[torch.LongTensor] = None
+ cls_logits: Optional[torch.FloatTensor] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+XLM_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`XLMConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+XLM_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ langs (`torch.LongTensor` of shape `({0})`, *optional*):
+ A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are
+ languages ids which can be obtained from the language names by using two conversion mappings provided in
+ the configuration of the model (only provided for multilingual models). More precisely, the *language name
+ to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the
+ *language id to language name* mapping is in `model.config.id2lang` (dictionary int to string).
+
+ See usage examples detailed in the [multilingual documentation](../multilingual).
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Length of each sentence that can be used to avoid performing attention on padding token indices. You can
+ also use *attention_mask* for the same result (see above), kept here for compatibility. Indices selected in
+ `[0, ..., input_ids.size(-1)]`.
+ cache (`Dict[str, torch.FloatTensor]`, *optional*):
+ Dictionary string to `torch.FloatTensor` that contains precomputed hidden states (key and values in the
+ attention blocks) as computed by the model (see `cache` output below). Can be used to speed up sequential
+ decoding.
+
+ The dictionary object will be modified in-place during the forward pass to add newly computed
+ hidden-states.
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare XLM Model transformer outputting raw hidden-states without any specific head on top.",
+ XLM_START_DOCSTRING,
+)
+class XLMModel(XLMPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ # encoder / decoder, output layer
+ self.is_encoder = config.is_encoder
+ self.is_decoder = not config.is_encoder
+ if self.is_decoder:
+ raise NotImplementedError("Currently XLM can only be used as an encoder")
+ # self.with_output = with_output
+ self.causal = config.causal
+
+ # dictionary / languages
+ self.n_langs = config.n_langs
+ self.use_lang_emb = config.use_lang_emb
+ self.n_words = config.n_words
+ self.eos_index = config.eos_index
+ self.pad_index = config.pad_index
+ # self.dico = dico
+ # self.id2lang = config.id2lang
+ # self.lang2id = config.lang2id
+ # assert len(self.dico) == self.n_words
+ # assert len(self.id2lang) == len(self.lang2id) == self.n_langs
+
+ # model parameters
+ self.dim = config.emb_dim # 512 by default
+ self.hidden_dim = self.dim * 4 # 2048 by default
+ self.n_heads = config.n_heads # 8 by default
+ self.n_layers = config.n_layers
+ self.dropout = config.dropout
+ self.attention_dropout = config.attention_dropout
+ assert self.dim % self.n_heads == 0, "transformer dim must be a multiple of n_heads"
+
+ # embeddings
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.dim)
+ if config.n_langs > 1 and config.use_lang_emb:
+ self.lang_embeddings = nn.Embedding(self.n_langs, self.dim)
+ self.embeddings = nn.Embedding(self.n_words, self.dim, padding_idx=self.pad_index)
+ self.layer_norm_emb = nn.LayerNorm(self.dim, eps=config.layer_norm_eps)
+
+ # transformer layers
+ self.attentions = nn.ModuleList()
+ self.layer_norm1 = nn.ModuleList()
+ self.ffns = nn.ModuleList()
+ self.layer_norm2 = nn.ModuleList()
+ # if self.is_decoder:
+ # self.layer_norm15 = nn.ModuleList()
+ # self.encoder_attn = nn.ModuleList()
+
+ for _ in range(self.n_layers):
+ self.attentions.append(MultiHeadAttention(self.n_heads, self.dim, config=config))
+ self.layer_norm1.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))
+ # if self.is_decoder:
+ # self.layer_norm15.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))
+ # self.encoder_attn.append(MultiHeadAttention(self.n_heads, self.dim, dropout=self.attention_dropout))
+ self.ffns.append(TransformerFFN(self.dim, self.hidden_dim, self.dim, config=config))
+ self.layer_norm2.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))
+
+ if hasattr(config, "pruned_heads"):
+ pruned_heads = config.pruned_heads.copy().items()
+ config.pruned_heads = {}
+ for layer, heads in pruned_heads:
+ if self.attentions[int(layer)].n_heads == config.n_heads:
+ self.prune_heads({int(layer): list(map(int, heads))})
+
+ # Initialize weights and apply final processing
+ self.post_init()
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
+ )
+
+ def get_input_embeddings(self):
+ return self.embeddings
+
+ def set_input_embeddings(self, new_embeddings):
+ self.embeddings = new_embeddings
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.attentions[layer].prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ langs: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ lengths: Optional[torch.Tensor] = None,
+ cache: Optional[Dict[str, torch.Tensor]] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None:
+ bs, slen = input_ids.size()
+ else:
+ bs, slen = inputs_embeds.size()[:-1]
+
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ if lengths is None:
+ if input_ids is not None:
+ lengths = (input_ids != self.pad_index).sum(dim=1).long()
+ else:
+ lengths = torch.tensor([slen] * bs, device=device)
+ # mask = input_ids != self.pad_index
+
+ # check inputs
+ assert lengths.size(0) == bs
+ assert lengths.max().item() <= slen
+ # input_ids = input_ids.transpose(0, 1) # batch size as dimension 0
+ # assert (src_enc is None) == (src_len is None)
+ # if src_enc is not None:
+ # assert self.is_decoder
+ # assert src_enc.size(0) == bs
+
+ # generate masks
+ mask, attn_mask = get_masks(slen, lengths, self.causal, padding_mask=attention_mask)
+ # if self.is_decoder and src_enc is not None:
+ # src_mask = torch.arange(src_len.max(), dtype=torch.long, device=lengths.device) < src_len[:, None]
+
+ # position_ids
+ if position_ids is None:
+ position_ids = self.position_ids[:, :slen]
+ else:
+ assert position_ids.size() == (bs, slen) # (slen, bs)
+ # position_ids = position_ids.transpose(0, 1)
+
+ # langs
+ if langs is not None:
+ assert langs.size() == (bs, slen) # (slen, bs)
+ # langs = langs.transpose(0, 1)
+
+ # Prepare head mask if needed
+ head_mask = self.get_head_mask(head_mask, self.config.n_layers)
+
+ # do not recompute cached elements
+ if cache is not None and input_ids is not None:
+ _slen = slen - cache["slen"]
+ input_ids = input_ids[:, -_slen:]
+ position_ids = position_ids[:, -_slen:]
+ if langs is not None:
+ langs = langs[:, -_slen:]
+ mask = mask[:, -_slen:]
+ attn_mask = attn_mask[:, -_slen:]
+
+ # embeddings
+ if inputs_embeds is None:
+ inputs_embeds = self.embeddings(input_ids)
+
+ tensor = inputs_embeds + self.position_embeddings(position_ids).expand_as(inputs_embeds)
+ if langs is not None and self.use_lang_emb and self.n_langs > 1:
+ tensor = tensor + self.lang_embeddings(langs)
+ if token_type_ids is not None:
+ tensor = tensor + self.embeddings(token_type_ids)
+ tensor = self.layer_norm_emb(tensor)
+ tensor = nn.functional.dropout(tensor, p=self.dropout, training=self.training)
+ tensor *= mask.unsqueeze(-1).to(tensor.dtype)
+
+ # transformer layers
+ hidden_states = () if output_hidden_states else None
+ attentions = () if output_attentions else None
+ for i in range(self.n_layers):
+ if output_hidden_states:
+ hidden_states = hidden_states + (tensor,)
+
+ # self attention
+ attn_outputs = self.attentions[i](
+ tensor,
+ attn_mask,
+ cache=cache,
+ head_mask=head_mask[i],
+ output_attentions=output_attentions,
+ )
+ attn = attn_outputs[0]
+ if output_attentions:
+ attentions = attentions + (attn_outputs[1],)
+ attn = nn.functional.dropout(attn, p=self.dropout, training=self.training)
+ tensor = tensor + attn
+ tensor = self.layer_norm1[i](tensor)
+
+ # encoder attention (for decoder only)
+ # if self.is_decoder and src_enc is not None:
+ # attn = self.encoder_attn[i](tensor, src_mask, kv=src_enc, cache=cache)
+ # attn = nn.functional.dropout(attn, p=self.dropout, training=self.training)
+ # tensor = tensor + attn
+ # tensor = self.layer_norm15[i](tensor)
+
+ # FFN
+ tensor = tensor + self.ffns[i](tensor)
+ tensor = self.layer_norm2[i](tensor)
+ tensor *= mask.unsqueeze(-1).to(tensor.dtype)
+
+ # Add last hidden state
+ if output_hidden_states:
+ hidden_states = hidden_states + (tensor,)
+
+ # update cache length
+ if cache is not None:
+ cache["slen"] += tensor.size(1)
+
+ # move back sequence length to dimension 0
+ # tensor = tensor.transpose(0, 1)
+
+ if not return_dict:
+ return tuple(v for v in [tensor, hidden_states, attentions] if v is not None)
+ return BaseModelOutput(last_hidden_state=tensor, hidden_states=hidden_states, attentions=attentions)
+
+
+class XLMPredLayer(nn.Module):
+ """
+ Prediction layer (cross_entropy or adaptive_softmax).
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ self.asm = config.asm
+ self.n_words = config.n_words
+ self.pad_index = config.pad_index
+ dim = config.emb_dim
+
+ if config.asm is False:
+ self.proj = nn.Linear(dim, config.n_words, bias=True)
+ else:
+ self.proj = nn.AdaptiveLogSoftmaxWithLoss(
+ in_features=dim,
+ n_classes=config.n_words,
+ cutoffs=config.asm_cutoffs,
+ div_value=config.asm_div_value,
+ head_bias=True, # default is False
+ )
+
+ def forward(self, x, y=None):
+ """Compute the loss, and optionally the scores."""
+ outputs = ()
+ if self.asm is False:
+ scores = self.proj(x)
+ outputs = (scores,) + outputs
+ if y is not None:
+ loss = nn.functional.cross_entropy(scores.view(-1, self.n_words), y.view(-1), reduction="mean")
+ outputs = (loss,) + outputs
+ else:
+ scores = self.proj.log_prob(x)
+ outputs = (scores,) + outputs
+ if y is not None:
+ _, loss = self.proj(x, y)
+ outputs = (loss,) + outputs
+
+ return outputs
+
+
+@add_start_docstrings(
+ """
+ The XLM Model transformer with a language modeling head on top (linear layer with weights tied to the input
+ embeddings).
+ """,
+ XLM_START_DOCSTRING,
+)
+class XLMWithLMHeadModel(XLMPreTrainedModel):
+ _tied_weights_keys = ["pred_layer.proj.weight"]
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.transformer = XLMModel(config)
+ self.pred_layer = XLMPredLayer(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.pred_layer.proj
+
+ def set_output_embeddings(self, new_embeddings):
+ self.pred_layer.proj = new_embeddings
+
+ def prepare_inputs_for_generation(self, input_ids, **kwargs):
+ mask_token_id = self.config.mask_token_id
+ lang_id = self.config.lang_id
+
+ effective_batch_size = input_ids.shape[0]
+ mask_token = torch.full((effective_batch_size, 1), mask_token_id, dtype=torch.long, device=input_ids.device)
+ input_ids = torch.cat([input_ids, mask_token], dim=1)
+ if lang_id is not None:
+ langs = torch.full_like(input_ids, lang_id)
+ else:
+ langs = None
+ return {"input_ids": input_ids, "langs": langs}
+
+ @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=MaskedLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ mask="",
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ langs: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ lengths: Optional[torch.Tensor] = None,
+ cache: Optional[Dict[str, torch.Tensor]] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, MaskedLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.transformer(
+ input_ids,
+ attention_mask=attention_mask,
+ langs=langs,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ lengths=lengths,
+ cache=cache,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ output = transformer_outputs[0]
+ outputs = self.pred_layer(output, labels) # (loss, logits) or (logits,) depending on if labels are provided.
+
+ if not return_dict:
+ return outputs + transformer_outputs[1:]
+
+ return MaskedLMOutput(
+ loss=outputs[0] if labels is not None else None,
+ logits=outputs[0] if labels is None else outputs[1],
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ XLM Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g.
+ for GLUE tasks.
+ """,
+ XLM_START_DOCSTRING,
+)
+class XLMForSequenceClassification(XLMPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.config = config
+
+ self.transformer = XLMModel(config)
+ self.sequence_summary = SequenceSummary(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=SequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ langs: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ lengths: Optional[torch.Tensor] = None,
+ cache: Optional[Dict[str, torch.Tensor]] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, SequenceClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.transformer(
+ input_ids,
+ attention_mask=attention_mask,
+ langs=langs,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ lengths=lengths,
+ cache=cache,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ output = transformer_outputs[0]
+ logits = self.sequence_summary(output)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ XLM Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ XLM_START_DOCSTRING,
+)
+class XLMForQuestionAnsweringSimple(XLMPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.transformer = XLMModel(config)
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=QuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ langs: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ lengths: Optional[torch.Tensor] = None,
+ cache: Optional[Dict[str, torch.Tensor]] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ start_positions: Optional[torch.Tensor] = None,
+ end_positions: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.transformer(
+ input_ids,
+ attention_mask=attention_mask,
+ langs=langs,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ lengths=lengths,
+ cache=cache,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = transformer_outputs[0]
+
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = logits.split(1, dim=-1)
+ start_logits = start_logits.squeeze(-1).contiguous()
+ end_logits = end_logits.squeeze(-1).contiguous()
+
+ total_loss = None
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, split add a dimension
+ if len(start_positions.size()) > 1:
+ start_positions = start_positions.squeeze(-1)
+ if len(end_positions.size()) > 1:
+ end_positions = end_positions.squeeze(-1)
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
+ ignored_index = start_logits.size(1)
+ start_positions = start_positions.clamp(0, ignored_index)
+ end_positions = end_positions.clamp(0, ignored_index)
+
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if not return_dict:
+ output = (start_logits, end_logits) + transformer_outputs[1:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return QuestionAnsweringModelOutput(
+ loss=total_loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ XLM Model with a beam-search span classification head on top for extractive question-answering tasks like SQuAD (a
+ linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ XLM_START_DOCSTRING,
+)
+class XLMForQuestionAnswering(XLMPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.transformer = XLMModel(config)
+ self.qa_outputs = SQuADHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=XLMForQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ langs: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ lengths: Optional[torch.Tensor] = None,
+ cache: Optional[Dict[str, torch.Tensor]] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ start_positions: Optional[torch.Tensor] = None,
+ end_positions: Optional[torch.Tensor] = None,
+ is_impossible: Optional[torch.Tensor] = None,
+ cls_index: Optional[torch.Tensor] = None,
+ p_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, XLMForQuestionAnsweringOutput]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ is_impossible (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels whether a question has an answer or no answer (SQuAD 2.0)
+ cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the classification token to use as input for computing plausibility of the
+ answer.
+ p_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...). 1.0 means token should be
+ masked. 0.0 mean token is not masked.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, XLMForQuestionAnswering
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("FacebookAI/xlm-mlm-en-2048")
+ >>> model = XLMForQuestionAnswering.from_pretrained("FacebookAI/xlm-mlm-en-2048")
+
+ >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(
+ ... 0
+ ... ) # Batch size 1
+ >>> start_positions = torch.tensor([1])
+ >>> end_positions = torch.tensor([3])
+
+ >>> outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
+ >>> loss = outputs.loss
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.transformer(
+ input_ids,
+ attention_mask=attention_mask,
+ langs=langs,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ lengths=lengths,
+ cache=cache,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ output = transformer_outputs[0]
+
+ outputs = self.qa_outputs(
+ output,
+ start_positions=start_positions,
+ end_positions=end_positions,
+ cls_index=cls_index,
+ is_impossible=is_impossible,
+ p_mask=p_mask,
+ return_dict=return_dict,
+ )
+
+ if not return_dict:
+ return outputs + transformer_outputs[1:]
+
+ return XLMForQuestionAnsweringOutput(
+ loss=outputs.loss,
+ start_top_log_probs=outputs.start_top_log_probs,
+ start_top_index=outputs.start_top_index,
+ end_top_log_probs=outputs.end_top_log_probs,
+ end_top_index=outputs.end_top_index,
+ cls_logits=outputs.cls_logits,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ XLM Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
+ Named-Entity-Recognition (NER) tasks.
+ """,
+ XLM_START_DOCSTRING,
+)
+class XLMForTokenClassification(XLMPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.transformer = XLMModel(config)
+ self.dropout = nn.Dropout(config.dropout)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ langs: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ lengths: Optional[torch.Tensor] = None,
+ cache: Optional[Dict[str, torch.Tensor]] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, TokenClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.transformer(
+ input_ids,
+ attention_mask=attention_mask,
+ langs=langs,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ lengths=lengths,
+ cache=cache,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ sequence_output = self.dropout(sequence_output)
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ XLM Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
+ softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ XLM_START_DOCSTRING,
+)
+class XLMForMultipleChoice(XLMPreTrainedModel):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.transformer = XLMModel(config)
+ self.sequence_summary = SequenceSummary(config)
+ self.logits_proj = nn.Linear(config.num_labels, 1)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=MultipleChoiceModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ langs: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ lengths: Optional[torch.Tensor] = None,
+ cache: Optional[Dict[str, torch.Tensor]] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, MultipleChoiceModelOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
+ `input_ids` above)
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
+
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
+ langs = langs.view(-1, langs.size(-1)) if langs is not None else None
+ inputs_embeds = (
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
+ if inputs_embeds is not None
+ else None
+ )
+
+ if lengths is not None:
+ logger.warning(
+ "The `lengths` parameter cannot be used with the XLM multiple choice models. Please use the "
+ "attention mask instead."
+ )
+ lengths = None
+
+ transformer_outputs = self.transformer(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ langs=langs,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ lengths=lengths,
+ cache=cache,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ output = transformer_outputs[0]
+ logits = self.sequence_summary(output)
+ logits = self.logits_proj(logits)
+ reshaped_logits = logits.view(-1, num_choices)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(reshaped_logits, labels)
+
+ if not return_dict:
+ output = (reshaped_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return MultipleChoiceModelOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )