diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0c4038527c6bac13f7bb6e9958462434ceac8271
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_japanese/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_japanese/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a569c3cc54bff82307d995f8bec52b9710279765
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_japanese/__init__.py
@@ -0,0 +1,29 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import _LazyModule
+
+
+_import_structure = {"tokenization_bert_japanese": ["BertJapaneseTokenizer", "CharacterTokenizer", "MecabTokenizer"]}
+
+
+if TYPE_CHECKING:
+ from .tokenization_bert_japanese import BertJapaneseTokenizer, CharacterTokenizer, MecabTokenizer
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_japanese/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_japanese/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ccd3516f284a27c8f2d8b29dffe3f388529f2bc2
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_japanese/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_japanese/__pycache__/tokenization_bert_japanese.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_japanese/__pycache__/tokenization_bert_japanese.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bcb3a901d8f0f43dd737575b0073718e59831558
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_japanese/__pycache__/tokenization_bert_japanese.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_japanese/tokenization_bert_japanese.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_japanese/tokenization_bert_japanese.py
new file mode 100644
index 0000000000000000000000000000000000000000..b2d1ac195801915f430cc85b7c076d9770625eae
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/bert_japanese/tokenization_bert_japanese.py
@@ -0,0 +1,1028 @@
+# coding=utf-8
+# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes."""
+
+
+import collections
+import copy
+import os
+import unicodedata
+from typing import Any, Dict, List, Optional, Tuple
+
+from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
+from ...utils import is_sentencepiece_available, is_sudachi_projection_available, logging
+
+
+if is_sentencepiece_available():
+ import sentencepiece as spm
+else:
+ spm = None
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "spm_file": "spiece.model"}
+
+SPIECE_UNDERLINE = "▁"
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {
+ "cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/vocab.txt",
+ "cl-tohoku/bert-base-japanese-whole-word-masking": (
+ "https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/vocab.txt"
+ ),
+ "cl-tohoku/bert-base-japanese-char": (
+ "https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/vocab.txt"
+ ),
+ "cl-tohoku/bert-base-japanese-char-whole-word-masking": (
+ "https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/vocab.txt"
+ ),
+ }
+}
+
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
+ "cl-tohoku/bert-base-japanese": 512,
+ "cl-tohoku/bert-base-japanese-whole-word-masking": 512,
+ "cl-tohoku/bert-base-japanese-char": 512,
+ "cl-tohoku/bert-base-japanese-char-whole-word-masking": 512,
+}
+
+PRETRAINED_INIT_CONFIGURATION = {
+ "cl-tohoku/bert-base-japanese": {
+ "do_lower_case": False,
+ "word_tokenizer_type": "mecab",
+ "subword_tokenizer_type": "wordpiece",
+ },
+ "cl-tohoku/bert-base-japanese-whole-word-masking": {
+ "do_lower_case": False,
+ "word_tokenizer_type": "mecab",
+ "subword_tokenizer_type": "wordpiece",
+ },
+ "cl-tohoku/bert-base-japanese-char": {
+ "do_lower_case": False,
+ "word_tokenizer_type": "mecab",
+ "subword_tokenizer_type": "character",
+ },
+ "cl-tohoku/bert-base-japanese-char-whole-word-masking": {
+ "do_lower_case": False,
+ "word_tokenizer_type": "mecab",
+ "subword_tokenizer_type": "character",
+ },
+}
+
+
+# Copied from transformers.models.bert.tokenization_bert.load_vocab
+def load_vocab(vocab_file):
+ """Loads a vocabulary file into a dictionary."""
+ vocab = collections.OrderedDict()
+ with open(vocab_file, "r", encoding="utf-8") as reader:
+ tokens = reader.readlines()
+ for index, token in enumerate(tokens):
+ token = token.rstrip("\n")
+ vocab[token] = index
+ return vocab
+
+
+# Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
+def whitespace_tokenize(text):
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
+ text = text.strip()
+ if not text:
+ return []
+ tokens = text.split()
+ return tokens
+
+
+class BertJapaneseTokenizer(PreTrainedTokenizer):
+ r"""
+ Construct a BERT tokenizer for Japanese text.
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer
+ to: this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to a one-wordpiece-per-line vocabulary file.
+ spm_file (`str`, *optional*):
+ Path to [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm or .model
+ extension) that contains the vocabulary.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether to lower case the input. Only has an effect when do_basic_tokenize=True.
+ do_word_tokenize (`bool`, *optional*, defaults to `True`):
+ Whether to do word tokenization.
+ do_subword_tokenize (`bool`, *optional*, defaults to `True`):
+ Whether to do subword tokenization.
+ word_tokenizer_type (`str`, *optional*, defaults to `"basic"`):
+ Type of word tokenizer. Choose from ["basic", "mecab", "sudachi", "jumanpp"].
+ subword_tokenizer_type (`str`, *optional*, defaults to `"wordpiece"`):
+ Type of subword tokenizer. Choose from ["wordpiece", "character", "sentencepiece",].
+ mecab_kwargs (`dict`, *optional*):
+ Dictionary passed to the `MecabTokenizer` constructor.
+ sudachi_kwargs (`dict`, *optional*):
+ Dictionary passed to the `SudachiTokenizer` constructor.
+ jumanpp_kwargs (`dict`, *optional*):
+ Dictionary passed to the `JumanppTokenizer` constructor.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+
+ def __init__(
+ self,
+ vocab_file,
+ spm_file=None,
+ do_lower_case=False,
+ do_word_tokenize=True,
+ do_subword_tokenize=True,
+ word_tokenizer_type="basic",
+ subword_tokenizer_type="wordpiece",
+ never_split=None,
+ unk_token="[UNK]",
+ sep_token="[SEP]",
+ pad_token="[PAD]",
+ cls_token="[CLS]",
+ mask_token="[MASK]",
+ mecab_kwargs=None,
+ sudachi_kwargs=None,
+ jumanpp_kwargs=None,
+ **kwargs,
+ ):
+ if subword_tokenizer_type == "sentencepiece":
+ if not os.path.isfile(spm_file):
+ raise ValueError(
+ f"Can't find a vocabulary file at path '{spm_file}'. To load the vocabulary from a Google"
+ " pretrained model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
+ )
+ self.spm_file = spm_file
+ else:
+ if not os.path.isfile(vocab_file):
+ raise ValueError(
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google"
+ " pretrained model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
+ )
+ self.vocab = load_vocab(vocab_file)
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
+
+ self.do_word_tokenize = do_word_tokenize
+ self.word_tokenizer_type = word_tokenizer_type
+ self.lower_case = do_lower_case
+ self.never_split = never_split
+ self.mecab_kwargs = copy.deepcopy(mecab_kwargs)
+ self.sudachi_kwargs = copy.deepcopy(sudachi_kwargs)
+ self.jumanpp_kwargs = copy.deepcopy(jumanpp_kwargs)
+ if do_word_tokenize:
+ if word_tokenizer_type == "basic":
+ self.word_tokenizer = BasicTokenizer(
+ do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=False
+ )
+ elif word_tokenizer_type == "mecab":
+ self.word_tokenizer = MecabTokenizer(
+ do_lower_case=do_lower_case, never_split=never_split, **(mecab_kwargs or {})
+ )
+ elif word_tokenizer_type == "sudachi":
+ self.word_tokenizer = SudachiTokenizer(
+ do_lower_case=do_lower_case, never_split=never_split, **(sudachi_kwargs or {})
+ )
+ elif word_tokenizer_type == "jumanpp":
+ self.word_tokenizer = JumanppTokenizer(
+ do_lower_case=do_lower_case, never_split=never_split, **(jumanpp_kwargs or {})
+ )
+ else:
+ raise ValueError(f"Invalid word_tokenizer_type '{word_tokenizer_type}' is specified.")
+
+ self.do_subword_tokenize = do_subword_tokenize
+ self.subword_tokenizer_type = subword_tokenizer_type
+ if do_subword_tokenize:
+ if subword_tokenizer_type == "wordpiece":
+ self.subword_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
+ elif subword_tokenizer_type == "character":
+ self.subword_tokenizer = CharacterTokenizer(vocab=self.vocab, unk_token=str(unk_token))
+ elif subword_tokenizer_type == "sentencepiece":
+ self.subword_tokenizer = SentencepieceTokenizer(vocab=self.spm_file, unk_token=str(unk_token))
+ else:
+ raise ValueError(f"Invalid subword_tokenizer_type '{subword_tokenizer_type}' is specified.")
+ super().__init__(
+ spm_file=spm_file,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ do_lower_case=do_lower_case,
+ do_word_tokenize=do_word_tokenize,
+ do_subword_tokenize=do_subword_tokenize,
+ word_tokenizer_type=word_tokenizer_type,
+ subword_tokenizer_type=subword_tokenizer_type,
+ never_split=never_split,
+ mecab_kwargs=mecab_kwargs,
+ sudachi_kwargs=sudachi_kwargs,
+ jumanpp_kwargs=jumanpp_kwargs,
+ **kwargs,
+ )
+
+ @property
+ def do_lower_case(self):
+ return self.lower_case
+
+ def __getstate__(self):
+ state = dict(self.__dict__)
+ if self.word_tokenizer_type in ["mecab", "sudachi", "jumanpp"]:
+ del state["word_tokenizer"]
+ return state
+
+ def __setstate__(self, state):
+ self.__dict__ = state
+ if self.word_tokenizer_type == "mecab":
+ self.word_tokenizer = MecabTokenizer(
+ do_lower_case=self.do_lower_case, never_split=self.never_split, **(self.mecab_kwargs or {})
+ )
+ elif self.word_tokenizer_type == "sudachi":
+ self.word_tokenizer = SudachiTokenizer(
+ do_lower_case=self.do_lower_case, never_split=self.never_split, **(self.sudachi_kwargs or {})
+ )
+ elif self.word_tokenizer_type == "jumanpp":
+ self.word_tokenizer = JumanppTokenizer(
+ do_lower_case=self.do_lower_case, never_split=self.never_split, **(self.jumanpp_kwargs or {})
+ )
+
+ def _tokenize(self, text):
+ if self.do_word_tokenize:
+ tokens = self.word_tokenizer.tokenize(text, never_split=self.all_special_tokens)
+ else:
+ tokens = [text]
+
+ if self.do_subword_tokenize:
+ split_tokens = [sub_token for token in tokens for sub_token in self.subword_tokenizer.tokenize(token)]
+ else:
+ split_tokens = tokens
+
+ return split_tokens
+
+ @property
+ def vocab_size(self):
+ if self.subword_tokenizer_type == "sentencepiece":
+ return len(self.subword_tokenizer.sp_model)
+ return len(self.vocab)
+
+ def get_vocab(self):
+ if self.subword_tokenizer_type == "sentencepiece":
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+ return dict(self.vocab, **self.added_tokens_encoder)
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ if self.subword_tokenizer_type == "sentencepiece":
+ return self.subword_tokenizer.sp_model.PieceToId(token)
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ if self.subword_tokenizer_type == "sentencepiece":
+ return self.subword_tokenizer.sp_model.IdToPiece(index)
+ return self.ids_to_tokens.get(index, self.unk_token)
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ if self.subword_tokenizer_type == "sentencepiece":
+ return self.subword_tokenizer.sp_model.decode(tokens)
+ out_string = " ".join(tokens).replace(" ##", "").strip()
+ return out_string
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.build_inputs_with_special_tokens
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A BERT sequence has the following format:
+
+ - single sequence: `[CLS] X [SEP]`
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ return cls + token_ids_0 + sep + token_ids_1 + sep
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_special_tokens_mask
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is not None:
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1]
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.create_token_type_ids_from_sequences
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
+ pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if os.path.isdir(save_directory):
+ if self.subword_tokenizer_type == "sentencepiece":
+ vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["spm_file"]
+ )
+ else:
+ vocab_file = os.path.join(
+ save_directory,
+ (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"],
+ )
+ else:
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
+
+ if self.subword_tokenizer_type == "sentencepiece":
+ with open(vocab_file, "wb") as writer:
+ content_spiece_model = self.subword_tokenizer.sp_model.serialized_model_proto()
+ writer.write(content_spiece_model)
+ else:
+ with open(vocab_file, "w", encoding="utf-8") as writer:
+ index = 0
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
+ if index != token_index:
+ logger.warning(
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
+ " Please check that the vocabulary is not corrupted!"
+ )
+ index = token_index
+ writer.write(token + "\n")
+ index += 1
+ return (vocab_file,)
+
+
+class MecabTokenizer:
+ """Runs basic tokenization with MeCab morphological parser."""
+
+ def __init__(
+ self,
+ do_lower_case=False,
+ never_split=None,
+ normalize_text=True,
+ mecab_dic: Optional[str] = "ipadic",
+ mecab_option: Optional[str] = None,
+ ):
+ """
+ Constructs a MecabTokenizer.
+
+ Args:
+ **do_lower_case**: (*optional*) boolean (default True)
+ Whether to lowercase the input.
+ **never_split**: (*optional*) list of str
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
+ [`PreTrainedTokenizer.tokenize`]) List of tokens not to split.
+ **normalize_text**: (*optional*) boolean (default True)
+ Whether to apply unicode normalization to text before tokenization.
+ **mecab_dic**: (*optional*) string (default "ipadic")
+ Name of dictionary to be used for MeCab initialization. If you are using a system-installed dictionary,
+ set this option to `None` and modify *mecab_option*.
+ **mecab_option**: (*optional*) string
+ String passed to MeCab constructor.
+ """
+ self.do_lower_case = do_lower_case
+ self.never_split = never_split if never_split is not None else []
+ self.normalize_text = normalize_text
+
+ try:
+ import fugashi
+ except ModuleNotFoundError as error:
+ raise error.__class__(
+ "You need to install fugashi to use MecabTokenizer. "
+ "See https://pypi.org/project/fugashi/ for installation."
+ )
+
+ mecab_option = mecab_option or ""
+
+ if mecab_dic is not None:
+ if mecab_dic == "ipadic":
+ try:
+ import ipadic
+ except ModuleNotFoundError as error:
+ raise error.__class__(
+ "The ipadic dictionary is not installed. "
+ "See https://github.com/polm/ipadic-py for installation."
+ )
+
+ dic_dir = ipadic.DICDIR
+
+ elif mecab_dic == "unidic_lite":
+ try:
+ import unidic_lite
+ except ModuleNotFoundError as error:
+ raise error.__class__(
+ "The unidic_lite dictionary is not installed. "
+ "See https://github.com/polm/unidic-lite for installation."
+ )
+
+ dic_dir = unidic_lite.DICDIR
+
+ elif mecab_dic == "unidic":
+ try:
+ import unidic
+ except ModuleNotFoundError as error:
+ raise error.__class__(
+ "The unidic dictionary is not installed. "
+ "See https://github.com/polm/unidic-py for installation."
+ )
+
+ dic_dir = unidic.DICDIR
+ if not os.path.isdir(dic_dir):
+ raise RuntimeError(
+ "The unidic dictionary itself is not found. "
+ "See https://github.com/polm/unidic-py for installation."
+ )
+
+ else:
+ raise ValueError("Invalid mecab_dic is specified.")
+
+ mecabrc = os.path.join(dic_dir, "mecabrc")
+ mecab_option = f'-d "{dic_dir}" -r "{mecabrc}" ' + mecab_option
+
+ self.mecab = fugashi.GenericTagger(mecab_option)
+
+ def tokenize(self, text, never_split=None, **kwargs):
+ """Tokenizes a piece of text."""
+ if self.normalize_text:
+ text = unicodedata.normalize("NFKC", text)
+
+ never_split = self.never_split + (never_split if never_split is not None else [])
+ tokens = []
+
+ for word in self.mecab(text):
+ token = word.surface
+
+ if self.do_lower_case and token not in never_split:
+ token = token.lower()
+
+ tokens.append(token)
+
+ return tokens
+
+
+class SudachiTokenizer:
+ """Runs basic tokenization with Sudachi morphological parser."""
+
+ def __init__(
+ self,
+ do_lower_case=False,
+ never_split=None,
+ normalize_text=True,
+ trim_whitespace=False,
+ sudachi_split_mode="A",
+ sudachi_config_path=None,
+ sudachi_resource_dir=None,
+ sudachi_dict_type="core",
+ sudachi_projection=None,
+ ):
+ """
+ Constructs a SudachiTokenizer.
+
+ Args:
+ **do_lower_case**: (*optional*) boolean (default True)
+ Whether to lowercase the input.
+ **never_split**: (*optional*) list of str
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
+ [`PreTrainedTokenizer.tokenize`]) List of tokens not to split.
+ **normalize_text**: (*optional*) boolean (default True)
+ Whether to apply unicode normalization to text before tokenization.
+ **trim_whitespace**: (*optional*) boolean (default False)
+ Whether to trim all whitespace, tab, newline from tokens.
+ **sudachi_split_mode**: (*optional*) string
+ Split mode of sudachi, choose from `["A", "B", "C"]`.
+ **sudachi_config_path**: (*optional*) string
+ **sudachi_resource_dir**: (*optional*) string
+ **sudachi_dict_type**: (*optional*) string
+ dict type of sudachi, choose from `["small", "core", "full"]`.
+ **sudachi_projection**: (*optional*) string
+ Word projection mode of sudachi, choose from `["surface", "normalized", "reading", "dictionary", "dictionary_and_surface", "normalized_and_surface", "normalized_nouns"]`.
+ """
+
+ self.do_lower_case = do_lower_case
+ self.never_split = never_split if never_split is not None else []
+ self.normalize_text = normalize_text
+ self.trim_whitespace = trim_whitespace
+
+ try:
+ from sudachipy import dictionary, tokenizer
+ except ImportError:
+ raise ImportError(
+ "You need to install sudachipy to use SudachiTokenizer. "
+ "See https://github.com/WorksApplications/SudachiPy for installation."
+ )
+
+ if sudachi_split_mode == "A":
+ self.split_mode = tokenizer.Tokenizer.SplitMode.A
+ elif sudachi_split_mode == "B":
+ self.split_mode = tokenizer.Tokenizer.SplitMode.B
+ elif sudachi_split_mode == "C":
+ self.split_mode = tokenizer.Tokenizer.SplitMode.C
+ else:
+ raise ValueError("Invalid sudachi_split_mode is specified.")
+
+ self.projection = sudachi_projection
+
+ sudachi_dictionary = dictionary.Dictionary(
+ config_path=sudachi_config_path, resource_dir=sudachi_resource_dir, dict=sudachi_dict_type
+ )
+ if is_sudachi_projection_available():
+ self.sudachi = sudachi_dictionary.create(self.split_mode, projection=self.projection)
+ elif self.projection is not None:
+ raise ImportError("You need to install sudachipy>=0.6.8 to specify `projection` field in sudachi_kwargs.")
+ else:
+ self.sudachi = sudachi_dictionary.create(self.split_mode)
+
+ def tokenize(self, text, never_split=None, **kwargs):
+ """Tokenizes a piece of text."""
+ if self.normalize_text:
+ text = unicodedata.normalize("NFKC", text)
+
+ never_split = self.never_split + (never_split if never_split is not None else [])
+ tokens = []
+
+ for word in self.sudachi.tokenize(text):
+ token = word.surface()
+
+ if self.do_lower_case and token not in never_split:
+ token = token.lower()
+
+ if self.trim_whitespace:
+ if token.strip() == "":
+ continue
+ else:
+ token = token.strip()
+
+ tokens.append(token)
+
+ return tokens
+
+
+class JumanppTokenizer:
+ """Runs basic tokenization with jumanpp morphological parser."""
+
+ def __init__(
+ self,
+ do_lower_case=False,
+ never_split=None,
+ normalize_text=True,
+ trim_whitespace=False,
+ ):
+ """
+ Constructs a JumanppTokenizer.
+
+ Args:
+ **do_lower_case**: (*optional*) boolean (default True)
+ Whether to lowercase the input.
+ **never_split**: (*optional*) list of str
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
+ [`PreTrainedTokenizer.tokenize`]) List of tokens not to split.
+ **normalize_text**: (*optional*) boolean (default True)
+ Whether to apply unicode normalization to text before tokenization.
+ **trim_whitespace**: (*optional*) boolean (default False)
+ Whether to trim all whitespace, tab, newline from tokens.
+ """
+
+ self.do_lower_case = do_lower_case
+ self.never_split = never_split if never_split is not None else []
+ self.normalize_text = normalize_text
+ self.trim_whitespace = trim_whitespace
+
+ try:
+ import rhoknp
+ except ImportError:
+ raise ImportError(
+ "You need to install rhoknp to use JumanppTokenizer. "
+ "See https://github.com/ku-nlp/rhoknp for installation."
+ )
+
+ self.juman = rhoknp.Jumanpp()
+
+ def tokenize(self, text, never_split=None, **kwargs):
+ """Tokenizes a piece of text."""
+ if self.normalize_text:
+ text = unicodedata.normalize("NFKC", text)
+
+ text = text.strip()
+
+ never_split = self.never_split + (never_split if never_split is not None else [])
+ tokens = []
+
+ for mrph in self.juman.apply_to_sentence(text).morphemes:
+ token = mrph.text
+
+ if self.do_lower_case and token not in never_split:
+ token = token.lower()
+
+ if self.trim_whitespace:
+ if token.strip() == "":
+ continue
+ else:
+ token = token.strip()
+
+ tokens.append(token)
+
+ return tokens
+
+
+class CharacterTokenizer:
+ """Runs Character tokenization."""
+
+ def __init__(self, vocab, unk_token, normalize_text=True):
+ """
+ Constructs a CharacterTokenizer.
+
+ Args:
+ **vocab**:
+ Vocabulary object.
+ **unk_token**: str
+ A special symbol for out-of-vocabulary token.
+ **normalize_text**: (`optional`) boolean (default True)
+ Whether to apply unicode normalization to text before tokenization.
+ """
+ self.vocab = vocab
+ self.unk_token = unk_token
+ self.normalize_text = normalize_text
+
+ def tokenize(self, text):
+ """
+ Tokenizes a piece of text into characters.
+
+ For example, `input = "apple""` wil return as output `["a", "p", "p", "l", "e"]`.
+
+ Args:
+ text: A single token or whitespace separated tokens.
+ This should have already been passed through *BasicTokenizer*.
+
+ Returns:
+ A list of characters.
+ """
+ if self.normalize_text:
+ text = unicodedata.normalize("NFKC", text)
+
+ output_tokens = []
+ for char in text:
+ if char not in self.vocab:
+ output_tokens.append(self.unk_token)
+ continue
+
+ output_tokens.append(char)
+
+ return output_tokens
+
+
+# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
+class BasicTokenizer(object):
+ """
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
+
+ Args:
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ never_split (`Iterable`, *optional*):
+ Collection of tokens which will never be split during tokenization. Only has an effect when
+ `do_basic_tokenize=True`
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters.
+
+ This should likely be deactivated for Japanese (see this
+ [issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
+ the full context of the words, such as contractions.
+ """
+
+ def __init__(
+ self,
+ do_lower_case=True,
+ never_split=None,
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ do_split_on_punc=True,
+ ):
+ if never_split is None:
+ never_split = []
+ self.do_lower_case = do_lower_case
+ self.never_split = set(never_split)
+ self.tokenize_chinese_chars = tokenize_chinese_chars
+ self.strip_accents = strip_accents
+ self.do_split_on_punc = do_split_on_punc
+
+ def tokenize(self, text, never_split=None):
+ """
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
+
+ Args:
+ never_split (`List[str]`, *optional*)
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
+ """
+ # union() returns a new set by concatenating the two sets.
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
+ text = self._clean_text(text)
+
+ # This was added on November 1st, 2018 for the multilingual and Chinese
+ # models. This is also applied to the English models now, but it doesn't
+ # matter since the English models were not trained on any Chinese data
+ # and generally don't have any Chinese data in them (there are Chinese
+ # characters in the vocabulary because Wikipedia does have some Chinese
+ # words in the English Wikipedia.).
+ if self.tokenize_chinese_chars:
+ text = self._tokenize_chinese_chars(text)
+ # prevents treating the same character with different unicode codepoints as different characters
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
+ split_tokens = []
+ for token in orig_tokens:
+ if token not in never_split:
+ if self.do_lower_case:
+ token = token.lower()
+ if self.strip_accents is not False:
+ token = self._run_strip_accents(token)
+ elif self.strip_accents:
+ token = self._run_strip_accents(token)
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
+
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
+ return output_tokens
+
+ def _run_strip_accents(self, text):
+ """Strips accents from a piece of text."""
+ text = unicodedata.normalize("NFD", text)
+ output = []
+ for char in text:
+ cat = unicodedata.category(char)
+ if cat == "Mn":
+ continue
+ output.append(char)
+ return "".join(output)
+
+ def _run_split_on_punc(self, text, never_split=None):
+ """Splits punctuation on a piece of text."""
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
+ return [text]
+ chars = list(text)
+ i = 0
+ start_new_word = True
+ output = []
+ while i < len(chars):
+ char = chars[i]
+ if _is_punctuation(char):
+ output.append([char])
+ start_new_word = True
+ else:
+ if start_new_word:
+ output.append([])
+ start_new_word = False
+ output[-1].append(char)
+ i += 1
+
+ return ["".join(x) for x in output]
+
+ def _tokenize_chinese_chars(self, text):
+ """Adds whitespace around any CJK character."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if self._is_chinese_char(cp):
+ output.append(" ")
+ output.append(char)
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+ def _is_chinese_char(self, cp):
+ """Checks whether CP is the codepoint of a CJK character."""
+ # This defines a "chinese character" as anything in the CJK Unicode block:
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
+ #
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
+ # despite its name. The modern Korean Hangul alphabet is a different block,
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
+ # space-separated words, so they are not treated specially and handled
+ # like the all of the other languages.
+ if (
+ (cp >= 0x4E00 and cp <= 0x9FFF)
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
+ or (cp >= 0xF900 and cp <= 0xFAFF)
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
+ ): #
+ return True
+
+ return False
+
+ def _clean_text(self, text):
+ """Performs invalid character removal and whitespace cleanup on text."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
+ continue
+ if _is_whitespace(char):
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+
+# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
+class WordpieceTokenizer(object):
+ """Runs WordPiece tokenization."""
+
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
+ self.vocab = vocab
+ self.unk_token = unk_token
+ self.max_input_chars_per_word = max_input_chars_per_word
+
+ def tokenize(self, text):
+ """
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
+ tokenization using the given vocabulary.
+
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
+
+ Args:
+ text: A single token or whitespace separated tokens. This should have
+ already been passed through *BasicTokenizer*.
+
+ Returns:
+ A list of wordpiece tokens.
+ """
+
+ output_tokens = []
+ for token in whitespace_tokenize(text):
+ chars = list(token)
+ if len(chars) > self.max_input_chars_per_word:
+ output_tokens.append(self.unk_token)
+ continue
+
+ is_bad = False
+ start = 0
+ sub_tokens = []
+ while start < len(chars):
+ end = len(chars)
+ cur_substr = None
+ while start < end:
+ substr = "".join(chars[start:end])
+ if start > 0:
+ substr = "##" + substr
+ if substr in self.vocab:
+ cur_substr = substr
+ break
+ end -= 1
+ if cur_substr is None:
+ is_bad = True
+ break
+ sub_tokens.append(cur_substr)
+ start = end
+
+ if is_bad:
+ output_tokens.append(self.unk_token)
+ else:
+ output_tokens.extend(sub_tokens)
+ return output_tokens
+
+
+class SentencepieceTokenizer(object):
+ """
+ Runs sentencepiece tokenization. Based on transformers.models.albert.tokenization_albert.AlbertTokenizer.
+ """
+
+ def __init__(
+ self,
+ vocab,
+ unk_token,
+ do_lower_case=False,
+ remove_space=True,
+ keep_accents=True,
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
+ ):
+ self.vocab = vocab
+ self.unk_token = unk_token
+ self.do_lower_case = do_lower_case
+ self.remove_space = remove_space
+ self.keep_accents = keep_accents
+
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(self.vocab)
+
+ def preprocess_text(self, inputs):
+ if self.remove_space:
+ outputs = " ".join(inputs.strip().split())
+ else:
+ outputs = inputs
+ outputs = outputs.replace("``", '"').replace("''", '"')
+
+ if not self.keep_accents:
+ outputs = unicodedata.normalize("NFKD", outputs)
+ outputs = "".join([c for c in outputs if not unicodedata.combining(c)])
+ if self.do_lower_case:
+ outputs = outputs.lower()
+
+ return outputs
+
+ def tokenize(self, text):
+ """
+ Tokenizes text by sentencepiece. Based on [SentencePiece](https://github.com/google/sentencepiece).
+ Tokenization needs the given vocabulary.
+
+ Args:
+ text: A string needs to be tokenized.
+
+ Returns:
+ A list of sentencepiece tokens.
+ """
+ text = self.preprocess_text(text)
+ pieces = self.sp_model.encode(text, out_type=str)
+ new_pieces = []
+ for piece in pieces:
+ if len(piece) > 1 and piece[-1] == str(",") and piece[-2].isdigit():
+ cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, ""))
+ if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
+ if len(cur_pieces[0]) == 1:
+ cur_pieces = cur_pieces[1:]
+ else:
+ cur_pieces[0] = cur_pieces[0][1:]
+ cur_pieces.append(piece[-1])
+ new_pieces.extend(cur_pieces)
+ else:
+ new_pieces.append(piece)
+
+ return new_pieces
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..86d857b1e9a26d958b5ab44a0539bae1f182473d
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/__init__.py
@@ -0,0 +1,142 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_flax_available,
+ is_tf_available,
+ is_tokenizers_available,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "configuration_blenderbot": [
+ "BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
+ "BlenderbotConfig",
+ "BlenderbotOnnxConfig",
+ ],
+ "tokenization_blenderbot": ["BlenderbotTokenizer"],
+}
+
+try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_blenderbot_fast"] = ["BlenderbotTokenizerFast"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_blenderbot"] = [
+ "BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "BlenderbotForCausalLM",
+ "BlenderbotForConditionalGeneration",
+ "BlenderbotModel",
+ "BlenderbotPreTrainedModel",
+ ]
+
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_blenderbot"] = [
+ "TFBlenderbotForConditionalGeneration",
+ "TFBlenderbotModel",
+ "TFBlenderbotPreTrainedModel",
+ ]
+
+
+try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_flax_blenderbot"] = [
+ "FlaxBlenderbotForConditionalGeneration",
+ "FlaxBlenderbotModel",
+ "FlaxBlenderbotPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_blenderbot import (
+ BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
+ BlenderbotConfig,
+ BlenderbotOnnxConfig,
+ )
+ from .tokenization_blenderbot import BlenderbotTokenizer
+
+ try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_blenderbot import (
+ BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ BlenderbotForCausalLM,
+ BlenderbotForConditionalGeneration,
+ BlenderbotModel,
+ BlenderbotPreTrainedModel,
+ )
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_blenderbot import (
+ TFBlenderbotForConditionalGeneration,
+ TFBlenderbotModel,
+ TFBlenderbotPreTrainedModel,
+ )
+
+ try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_flax_blenderbot import (
+ FlaxBlenderbotForConditionalGeneration,
+ FlaxBlenderbotModel,
+ FlaxBlenderbotPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..70d0e03b54ca521a50e77d029c6129a3402c9dec
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/configuration_blenderbot.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/configuration_blenderbot.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e232a7232d0fa492c4dc5b6cd12bc7778b6dc263
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/configuration_blenderbot.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d41dee67081ac6c3fe97f9c0703cad3348b6e15d
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/modeling_blenderbot.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/modeling_blenderbot.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8d0a6a3ebd319b6abec17a34ef1520dedc2a0071
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/modeling_blenderbot.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/modeling_flax_blenderbot.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/modeling_flax_blenderbot.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a05da66f3940317a8c3063a1f5043985086d3db8
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/modeling_flax_blenderbot.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/modeling_tf_blenderbot.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/modeling_tf_blenderbot.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..eb01fc58e0a05b983b059fbd6ef12bb6793e82b2
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/modeling_tf_blenderbot.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/tokenization_blenderbot.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/tokenization_blenderbot.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1a8e7b13543a6e7bbaba771f7d91079453105c7e
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/tokenization_blenderbot.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/tokenization_blenderbot_fast.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/tokenization_blenderbot_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..11f96cdfa6300bef93a73b006e539c4f0cfdf880
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/tokenization_blenderbot_fast.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/configuration_blenderbot.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/configuration_blenderbot.py
new file mode 100644
index 0000000000000000000000000000000000000000..4f55a96bf62b71cd17dfd6e201746b82932db0f9
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/configuration_blenderbot.py
@@ -0,0 +1,397 @@
+# coding=utf-8
+# Copyright 2021 The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Blenderbot model configuration"""
+
+from collections import OrderedDict
+from typing import Any, Mapping, Optional
+
+from ... import PreTrainedTokenizer
+from ...configuration_utils import PretrainedConfig
+from ...file_utils import TensorType, is_torch_available
+from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast
+from ...onnx.utils import compute_effective_axis_dimension
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/config.json",
+ # See all Blenderbot models at https://huggingface.co/models?filter=blenderbot
+}
+
+
+class BlenderbotConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`BlenderbotModel`]. It is used to instantiate an
+ Blenderbot model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the Blenderbot
+ [facebook/blenderbot-3B](https://huggingface.co/facebook/blenderbot-3B) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 50265):
+ Vocabulary size of the Blenderbot model. Defines the number of different tokens that can be represented by
+ the `inputs_ids` passed when calling [`BlenderbotModel`] or [`TFBlenderbotModel`].
+ d_model (`int`, *optional*, defaults to 1024):
+ Dimensionality of the layers and the pooler layer.
+ encoder_layers (`int`, *optional*, defaults to 12):
+ Number of encoder layers.
+ decoder_layers (`int`, *optional*, defaults to 12):
+ Number of decoder layers.
+ encoder_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ decoder_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer decoder.
+ decoder_ffn_dim (`int`, *optional*, defaults to 4096):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
+ encoder_ffn_dim (`int`, *optional*, defaults to 4096):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ activation_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for activations inside the fully connected layer.
+ max_position_embeddings (`int`, *optional*, defaults to 128):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ init_std (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
+ for more details.
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
+ for more details.
+ scale_embedding (`bool`, *optional*, defaults to `False`):
+ Scale embeddings by diving by sqrt(d_model).
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models)
+ forced_eos_token_id (`int`, *optional*, defaults to 2):
+ The id of the token to force as the last generated token when `max_length` is reached. Usually set to
+ `eos_token_id`.
+
+ Example:
+
+ ```python
+ >>> from transformers import BlenderbotConfig, BlenderbotModel
+
+ >>> # Initializing a Blenderbot facebook/blenderbot-3B style configuration
+ >>> configuration = BlenderbotConfig()
+
+ >>> # Initializing a model (with random weights) from the facebook/blenderbot-3B style configuration
+ >>> model = BlenderbotModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "blenderbot"
+ keys_to_ignore_at_inference = ["past_key_values"]
+ attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
+
+ def __init__(
+ self,
+ vocab_size=8008,
+ max_position_embeddings=128,
+ encoder_layers=2,
+ encoder_ffn_dim=10240,
+ encoder_attention_heads=32,
+ decoder_layers=24,
+ decoder_ffn_dim=10240,
+ decoder_attention_heads=32,
+ encoder_layerdrop=0.0,
+ decoder_layerdrop=0.0,
+ use_cache=True,
+ is_encoder_decoder=True,
+ activation_function="gelu",
+ d_model=2560,
+ dropout=0.1,
+ attention_dropout=0.0,
+ activation_dropout=0.0,
+ init_std=0.02,
+ decoder_start_token_id=1,
+ scale_embedding=False,
+ pad_token_id=0,
+ bos_token_id=1,
+ eos_token_id=2,
+ encoder_no_repeat_ngram_size=3,
+ forced_eos_token_id=2,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.d_model = d_model
+ self.encoder_ffn_dim = encoder_ffn_dim
+ self.encoder_layers = encoder_layers
+ self.encoder_attention_heads = encoder_attention_heads
+ self.decoder_ffn_dim = decoder_ffn_dim
+ self.decoder_layers = decoder_layers
+ self.decoder_attention_heads = decoder_attention_heads
+ self.dropout = dropout
+ self.attention_dropout = attention_dropout
+ self.activation_dropout = activation_dropout
+ self.activation_function = activation_function
+ self.init_std = init_std
+ self.encoder_layerdrop = encoder_layerdrop
+ self.decoder_layerdrop = decoder_layerdrop
+ self.use_cache = use_cache
+ self.num_hidden_layers = encoder_layers
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
+
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ is_encoder_decoder=is_encoder_decoder,
+ decoder_start_token_id=decoder_start_token_id,
+ encoder_no_repeat_ngram_size=encoder_no_repeat_ngram_size,
+ forced_eos_token_id=forced_eos_token_id,
+ **kwargs,
+ )
+
+
+class BlenderbotOnnxConfig(OnnxSeq2SeqConfigWithPast):
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ if self.task in ["default", "seq2seq-lm"]:
+ common_inputs = OrderedDict(
+ [
+ ("input_ids", {0: "batch", 1: "encoder_sequence"}),
+ ("attention_mask", {0: "batch", 1: "encoder_sequence"}),
+ ]
+ )
+ if self.use_past:
+ common_inputs["decoder_input_ids"] = {0: "batch"}
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"}
+ else:
+ common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"}
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"}
+ if self.use_past:
+ self.fill_with_past_key_values_(common_inputs, direction="inputs")
+ elif self.task == "causal-lm":
+ common_inputs = OrderedDict(
+ [
+ ("input_ids", {0: "batch", 1: "encoder_sequence"}),
+ ("attention_mask", {0: "batch", 1: "encoder_sequence"}),
+ ]
+ )
+ if self.use_past:
+ _, num_decoder_layers = self.num_layers
+ for i in range(num_decoder_layers):
+ common_inputs[f"past_key_values.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"}
+ common_inputs[f"past_key_values.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"}
+ else:
+ common_inputs = OrderedDict(
+ [
+ ("input_ids", {0: "batch", 1: "encoder_sequence"}),
+ ("attention_mask", {0: "batch", 1: "encoder_sequence"}),
+ ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
+ ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
+ ]
+ )
+
+ return common_inputs
+
+ @property
+ # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
+ if self.task in ["default", "seq2seq-lm"]:
+ common_outputs = super().outputs
+ else:
+ common_outputs = super(OnnxConfigWithPast, self).outputs
+ if self.use_past:
+ num_encoder_layers, _ = self.num_layers
+ for i in range(num_encoder_layers):
+ common_outputs[f"present.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"}
+ common_outputs[f"present.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"}
+ return common_outputs
+
+ def _generate_dummy_inputs_for_default_and_seq2seq_lm(
+ self,
+ tokenizer: PreTrainedTokenizer,
+ batch_size: int = -1,
+ seq_length: int = -1,
+ is_pair: bool = False,
+ framework: Optional[TensorType] = None,
+ ) -> Mapping[str, Any]:
+ encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
+ tokenizer, batch_size, seq_length, is_pair, framework
+ )
+ # Generate decoder inputs
+ decoder_seq_length = seq_length if not self.use_past else 1
+ decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
+ tokenizer, batch_size, decoder_seq_length, is_pair, framework
+ )
+ decoder_inputs = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
+ common_inputs = dict(**encoder_inputs, **decoder_inputs)
+
+ if self.use_past:
+ if not is_torch_available():
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
+ else:
+ import torch
+ batch, encoder_seq_length = common_inputs["input_ids"].shape
+ decoder_seq_length = common_inputs["decoder_input_ids"].shape[1]
+ num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads
+ encoder_shape = (
+ batch,
+ num_encoder_attention_heads,
+ encoder_seq_length,
+ self._config.hidden_size // num_encoder_attention_heads,
+ )
+ decoder_past_length = decoder_seq_length
+ decoder_shape = (
+ batch,
+ num_decoder_attention_heads,
+ decoder_past_length,
+ self._config.hidden_size // num_decoder_attention_heads,
+ )
+ common_inputs["decoder_attention_mask"] = torch.cat(
+ [common_inputs["decoder_attention_mask"], torch.ones(batch, decoder_past_length)], dim=1
+ )
+ common_inputs["past_key_values"] = []
+ _, num_decoder_layers = self.num_layers
+
+ for _ in range(num_decoder_layers):
+ common_inputs["past_key_values"].append(
+ (
+ torch.zeros(decoder_shape),
+ torch.zeros(decoder_shape),
+ torch.zeros(encoder_shape),
+ torch.zeros(encoder_shape),
+ )
+ )
+ return common_inputs
+
+ def _generate_dummy_inputs_for_causal_lm(
+ self,
+ tokenizer: PreTrainedTokenizer,
+ batch_size: int = -1,
+ seq_length: int = -1,
+ is_pair: bool = False,
+ framework: Optional[TensorType] = None,
+ ) -> Mapping[str, Any]:
+ common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
+ tokenizer, batch_size, seq_length, is_pair, framework
+ )
+
+ if self.use_past:
+ if not is_torch_available():
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
+ else:
+ import torch
+ batch, seqlen = common_inputs["input_ids"].shape
+ past_key_values_length = seqlen
+ _, num_decoder_layers = self.num_layers
+ num_encoder_attention_heads, _ = self.num_attention_heads
+ past_shape = (
+ batch,
+ num_encoder_attention_heads,
+ past_key_values_length,
+ self._config.hidden_size // num_encoder_attention_heads,
+ )
+ mask_dtype = common_inputs["attention_mask"].dtype
+ common_inputs["attention_mask"] = torch.cat(
+ [common_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1
+ )
+ common_inputs["past_key_values"] = [
+ (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_decoder_layers)
+ ]
+ return common_inputs
+
+ # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig._generate_dummy_inputs_for_sequence_classification_and_question_answering
+ def _generate_dummy_inputs_for_sequence_classification_and_question_answering(
+ self,
+ tokenizer: PreTrainedTokenizer,
+ batch_size: int = -1,
+ seq_length: int = -1,
+ is_pair: bool = False,
+ framework: Optional[TensorType] = None,
+ ) -> Mapping[str, Any]:
+ # Copied from OnnxConfig.generate_dummy_inputs
+ # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
+ # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
+ batch_size = compute_effective_axis_dimension(
+ batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0
+ )
+
+ # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
+ token_to_add = tokenizer.num_special_tokens_to_add(is_pair)
+ seq_length = compute_effective_axis_dimension(
+ seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add
+ )
+
+ # Generate dummy inputs according to compute batch and sequence
+ dummy_input = [" ".join([tokenizer.unk_token]) * seq_length] * batch_size
+ common_inputs = dict(tokenizer(dummy_input, return_tensors=framework))
+ return common_inputs
+
+ # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.generate_dummy_inputs
+ def generate_dummy_inputs(
+ self,
+ tokenizer: PreTrainedTokenizer,
+ batch_size: int = -1,
+ seq_length: int = -1,
+ is_pair: bool = False,
+ framework: Optional[TensorType] = None,
+ ) -> Mapping[str, Any]:
+ if self.task in ["default", "seq2seq-lm"]:
+ common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm(
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
+ )
+
+ elif self.task == "causal-lm":
+ common_inputs = self._generate_dummy_inputs_for_causal_lm(
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
+ )
+ else:
+ common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
+ )
+
+ return common_inputs
+
+ # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig._flatten_past_key_values_
+ def _flatten_past_key_values_(self, flattened_output, name, idx, t):
+ if self.task in ["default", "seq2seq-lm"]:
+ flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t)
+ else:
+ flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_(
+ flattened_output, name, idx, t
+ )
+
+ def fill_with_past_key_values_(self, inputs_or_outputs: Mapping[str, Mapping[int, str]], direction: str):
+ if direction not in ["inputs", "outputs"]:
+ raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given')
+
+ name = "past_key_values" if direction == "inputs" else "present"
+ _, num_decoder_layers = self.num_layers
+
+ encoder_sequence = "past_encoder_sequence"
+ decoder_sequence = "past_decoder_sequence" if direction == "inputs" else "past_decoder_sequence + sequence"
+
+ for i in range(num_decoder_layers):
+ inputs_or_outputs[f"{name}.{i}.decoder.key"] = {0: "batch", 2: decoder_sequence}
+ inputs_or_outputs[f"{name}.{i}.decoder.value"] = {0: "batch", 2: decoder_sequence}
+ inputs_or_outputs[f"{name}.{i}.encoder.key"] = {0: "batch", 2: encoder_sequence}
+ inputs_or_outputs[f"{name}.{i}.encoder.value"] = {0: "batch", 2: encoder_sequence}
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..c5919b94d42fb3555010cc9a454b2d31ecaa52ed
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.py
@@ -0,0 +1,114 @@
+# coding=utf-8
+# Copyright 2020 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert Blenderbot checkpoint."""
+
+import argparse
+
+import torch
+
+from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
+from transformers.utils import logging
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+PATTERNS = [
+ ["attention", "attn"],
+ ["encoder_attention", "encoder_attn"],
+ ["q_lin", "q_proj"],
+ ["k_lin", "k_proj"],
+ ["v_lin", "v_proj"],
+ ["out_lin", "out_proj"],
+ ["norm_embeddings", "layernorm_embedding"],
+ ["position_embeddings", "embed_positions"],
+ ["embeddings", "embed_tokens"],
+ ["ffn.lin", "fc"],
+]
+
+
+def rename_state_dict_key(k):
+ if k == "embeddings.weight":
+ return "shared.weight"
+
+ for parlai_name, hf_name in PATTERNS:
+ k = k.replace(parlai_name, hf_name)
+
+ if k.startswith("encoder"):
+ k = k.replace(".attn", ".self_attn")
+ k = k.replace("norm1", "self_attn_layer_norm")
+ k = k.replace("norm2", "final_layer_norm")
+ elif k.startswith("decoder"):
+ k = k.replace("norm1", "self_attn_layer_norm")
+ k = k.replace("norm2", "encoder_attn_layer_norm")
+ k = k.replace("norm3", "final_layer_norm")
+ return k
+
+
+def rename_layernorm_keys(sd):
+ keys = [
+ "model.encoder.layernorm_embedding.weight",
+ "model.encoder.layernorm_embedding.bias",
+ "model.decoder.layernorm_embedding.weight",
+ "model.decoder.layernorm_embedding.bias",
+ ]
+ for k in keys:
+ v = sd.pop(k)
+ new_k = k.replace("layernorm_embedding", "layer_norm")
+ assert new_k not in sd
+ sd[new_k] = v
+
+
+IGNORE_KEYS = ["START"]
+
+
+@torch.no_grad()
+def convert_parlai_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_json_path):
+ """
+ Copy/paste/tweak model's weights to our BERT structure.
+ """
+ model = torch.load(checkpoint_path, map_location="cpu")
+ sd = model["model"]
+ cfg = BlenderbotConfig.from_json_file(config_json_path)
+ m = BlenderbotForConditionalGeneration(cfg)
+ valid_keys = m.model.state_dict().keys()
+ failures = []
+ mapping = {}
+ for k, v in sd.items():
+ if k in IGNORE_KEYS:
+ continue
+
+ new_k = rename_state_dict_key(k)
+ if new_k not in valid_keys:
+ failures.append([k, new_k])
+ else:
+ mapping[new_k] = v
+ if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
+ rename_layernorm_keys(sd)
+ m.model.load_state_dict(mapping, strict=True)
+ m.half()
+ m.save_pretrained(pytorch_dump_folder_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
+ parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
+ parser.add_argument(
+ "--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
+ )
+ args = parser.parse_args()
+ convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/modeling_blenderbot.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/modeling_blenderbot.py
new file mode 100644
index 0000000000000000000000000000000000000000..28b81387c13e628d100217e331fe8d8e49821083
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/modeling_blenderbot.py
@@ -0,0 +1,1600 @@
+# coding=utf-8
+# Copyright 2021 The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch Blenderbot model."""
+
+
+import copy
+import math
+import os
+import warnings
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import CrossEntropyLoss
+
+from ...activations import ACT2FN
+from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
+from ...modeling_outputs import (
+ BaseModelOutput,
+ BaseModelOutputWithPastAndCrossAttentions,
+ CausalLMOutputWithCrossAttentions,
+ Seq2SeqLMOutput,
+ Seq2SeqModelOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ add_end_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from ..blenderbot_small import BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel
+from .configuration_blenderbot import BlenderbotConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "BlenderbotConfig"
+_CHECKPOINT_FOR_DOC = "facebook/blenderbot-400M-distill"
+
+
+BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "facebook/blenderbot-3B",
+ # See all Blenderbot models at https://huggingface.co/models?filter=blenderbot
+]
+
+
+# Copied from transformers.models.bart.modeling_bart.shift_tokens_right
+def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
+ """
+ Shift input ids one token to the right.
+ """
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
+ shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
+ shifted_input_ids[:, 0] = decoder_start_token_id
+
+ if pad_token_id is None:
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
+ # replace possible -100 values in labels by `pad_token_id`
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
+
+ return shifted_input_ids
+
+
+class BlenderbotLearnedPositionalEmbedding(nn.Embedding):
+ """
+ This module learns positional embeddings up to a fixed maximum size.
+ """
+
+ def __init__(self, num_embeddings: int, embedding_dim: int):
+ super().__init__(num_embeddings, embedding_dim)
+
+ def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0):
+ """`input_ids_shape` is expected to be [bsz x seqlen]."""
+ bsz, seq_len = input_ids_shape[:2]
+ positions = torch.arange(
+ past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
+ )
+ return super().forward(positions)
+
+
+# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Blenderbot
+class BlenderbotAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ is_decoder: bool = False,
+ bias: bool = True,
+ is_causal: bool = False,
+ config: Optional[BlenderbotConfig] = None,
+ ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.dropout = dropout
+ self.head_dim = embed_dim // num_heads
+ self.config = config
+
+ if (self.head_dim * num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+ self.is_causal = is_causal
+
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ key_value_states: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+
+ bsz, tgt_len, _ = hidden_states.size()
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scaling
+ # get key, value proj
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
+ # the provided `key_value_states` to support prefix tuning
+ if (
+ is_cross_attention
+ and past_key_value is not None
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
+ ):
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
+ key_states = key_states.reshape(*proj_shape)
+ value_states = value_states.reshape(*proj_shape)
+
+ src_len = key_states.size(1)
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
+
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
+ )
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if layer_head_mask is not None:
+ if layer_head_mask.size() != (self.num_heads,):
+ raise ValueError(
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
+ f" {layer_head_mask.size()}"
+ )
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ if output_attentions:
+ # this operation is a bit awkward, but it's required to
+ # make sure that attn_weights keeps its gradient.
+ # In order to do so, attn_weights have to be reshaped
+ # twice and have to be reused in the following
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
+ else:
+ attn_weights_reshaped = None
+
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ attn_output = torch.bmm(attn_probs, value_states)
+
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
+ attn_output = attn_output.transpose(1, 2)
+
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
+ # partitioned across GPUs when using tensor-parallelism.
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped, past_key_value
+
+
+BLENDERBOT_ATTENTION_CLASSES = {"eager": BlenderbotAttention}
+
+
+# Copied from transformers.models.mbart.modeling_mbart.MBartEncoderLayer with MBart->Blenderbot, MBART->BLENDERBOT
+class BlenderbotEncoderLayer(nn.Module):
+ def __init__(self, config: BlenderbotConfig):
+ super().__init__()
+ self.embed_dim = config.d_model
+
+ self.self_attn = BLENDERBOT_ATTENTION_CLASSES[config._attn_implementation](
+ embed_dim=self.embed_dim,
+ num_heads=config.encoder_attention_heads,
+ dropout=config.attention_dropout,
+ config=config,
+ )
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+ self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
+ self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: torch.Tensor,
+ layer_head_mask: torch.Tensor,
+ output_attentions: bool = False,
+ ) -> torch.Tensor:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
+ `(encoder_attention_heads,)`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+ hidden_states, attn_weights, _ = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ residual = hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ if hidden_states.dtype == torch.float16 and (
+ torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
+ ):
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+# Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer with MBart->Blenderbot, MBART->BLENDERBOT
+class BlenderbotDecoderLayer(nn.Module):
+ def __init__(self, config: BlenderbotConfig):
+ super().__init__()
+ self.embed_dim = config.d_model
+
+ self.self_attn = BLENDERBOT_ATTENTION_CLASSES[config._attn_implementation](
+ embed_dim=self.embed_dim,
+ num_heads=config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=True,
+ is_causal=True,
+ config=config,
+ )
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.encoder_attn = BLENDERBOT_ATTENTION_CLASSES[config._attn_implementation](
+ self.embed_dim,
+ config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=True,
+ config=config,
+ )
+ self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
+ self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = True,
+ ) -> torch.Tensor:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ encoder_hidden_states (`torch.FloatTensor`):
+ cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
+ encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
+ `(encoder_attention_heads,)`.
+ cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
+ size `(decoder_attention_heads,)`.
+ past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Self Attention
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ past_key_value=self_attn_past_key_value,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ # Cross-Attention Block
+ cross_attn_present_key_value = None
+ cross_attn_weights = None
+ if encoder_hidden_states is not None:
+ residual = hidden_states
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
+
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
+ hidden_states=hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attention_mask,
+ layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=cross_attn_past_key_value,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ # add cross-attn to positions 3,4 of present_key_value tuple
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights, cross_attn_weights)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ return outputs
+
+
+class BlenderbotPreTrainedModel(PreTrainedModel):
+ config_class = BlenderbotConfig
+ base_model_prefix = "model"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ std = self.config.init_std
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+ @property
+ def dummy_inputs(self):
+ pad_token = self.config.pad_token_id
+ input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
+ dummy_inputs = {
+ "attention_mask": input_ids.ne(pad_token),
+ "input_ids": input_ids,
+ "decoder_input_ids": input_ids,
+ }
+ return dummy_inputs
+
+
+BLENDERBOT_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`BlenderbotConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+BLENDERBOT_GENERATION_EXAMPLE = r"""
+ Conversation example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, BlenderbotForConditionalGeneration
+
+ >>> mname = "facebook/blenderbot-400M-distill"
+ >>> model = BlenderbotForConditionalGeneration.from_pretrained(mname)
+ >>> tokenizer = AutoTokenizer.from_pretrained(mname)
+ >>> UTTERANCE = "My friends are cool but they eat too many carbs."
+ >>> print("Human: ", UTTERANCE)
+ Human: My friends are cool but they eat too many carbs.
+
+ >>> inputs = tokenizer([UTTERANCE], return_tensors="pt")
+ >>> reply_ids = model.generate(**inputs)
+ >>> print("Bot: ", tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0])
+ Bot: That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?
+
+ >>> REPLY = "I'm not sure"
+ >>> print("Human: ", REPLY)
+ Human: I'm not sure
+
+ >>> NEXT_UTTERANCE = (
+ ... "My friends are cool but they eat too many carbs. That's unfortunate. "
+ ... "Are they trying to lose weight or are they just trying to be healthier? "
+ ... " I'm not sure."
+ ... )
+ >>> inputs = tokenizer([NEXT_UTTERANCE], return_tensors="pt")
+ >>> next_reply_ids = model.generate(**inputs)
+ >>> print("Bot: ", tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0])
+ Bot: I see. Well, it's good that they're trying to change their eating habits.
+ ```
+"""
+
+BLENDERBOT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ Blenderbot uses the `bos_token_id` as the starting token for `decoder_input_ids` generation. If
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+ decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
+ 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
+ input (see `past_key_values`). This is useful if you want more control over how to convert
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
+
+ If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
+ of `inputs_embeds`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+class BlenderbotEncoder(BlenderbotPreTrainedModel):
+ """
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
+ [`BlenderbotEncoderLayer`].
+
+ Args:
+ config: BlenderbotConfig
+ embed_tokens (nn.Embedding): output embedding
+ """
+
+ def __init__(self, config: BlenderbotConfig, embed_tokens: Optional[nn.Embedding] = None):
+ super().__init__(config)
+
+ self.dropout = config.dropout
+ self.layerdrop = config.encoder_layerdrop
+
+ embed_dim = config.d_model
+ self.padding_idx = config.pad_token_id
+ self.max_source_positions = config.max_position_embeddings
+ self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
+
+ if embed_tokens is not None:
+ self.embed_tokens = embed_tokens
+ else:
+ self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
+
+ self.embed_positions = BlenderbotLearnedPositionalEmbedding(
+ config.max_position_embeddings,
+ embed_dim,
+ )
+ self.layers = nn.ModuleList([BlenderbotEncoderLayer(config) for _ in range(config.encoder_layers)])
+ self.layer_norm = nn.LayerNorm(config.d_model)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def forward(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ head_mask=None,
+ inputs_embeds=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ ):
+ r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # retrieve input_ids and inputs_embeds
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ input_ids = input_ids.view(-1, input_shape[-1])
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+
+ embed_pos = self.embed_positions(input_shape)
+
+ hidden_states = inputs_embeds + embed_pos
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ # expand attention_mask
+ if attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ # check if head_mask has a correct number of layers specified if desired
+ if head_mask is not None:
+ if head_mask.size()[0] != len(self.layers):
+ raise ValueError(
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
+ f" {head_mask.size()[0]}."
+ )
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ to_drop = False
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop: # skip the layer
+ to_drop = True
+
+ if to_drop:
+ layer_outputs = (None, None)
+ else:
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ encoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ (head_mask[idx] if head_mask is not None else None),
+ output_attentions,
+ )
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ # add final layer norm
+ hidden_states = self.layer_norm(hidden_states)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+
+class BlenderbotDecoder(BlenderbotPreTrainedModel):
+ """
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`BlenderbotDecoderLayer`]
+
+ Args:
+ config: BlenderbotConfig
+ embed_tokens (nn.Embedding): output embedding
+ """
+
+ def __init__(self, config: BlenderbotConfig, embed_tokens: Optional[nn.Embedding] = None):
+ super().__init__(config)
+ self.dropout = config.dropout
+ self.layerdrop = config.decoder_layerdrop
+ self.padding_idx = config.pad_token_id
+ self.max_target_positions = config.max_position_embeddings
+ self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
+
+ if embed_tokens is not None:
+ self.embed_tokens = embed_tokens
+ else:
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
+
+ self.embed_positions = BlenderbotLearnedPositionalEmbedding(
+ config.max_position_embeddings,
+ config.d_model,
+ )
+ self.layers = nn.ModuleList([BlenderbotDecoderLayer(config) for _ in range(config.decoder_layers)])
+ self.layer_norm = nn.LayerNorm(config.d_model)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.embed_tokens = value
+
+ def forward(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ head_mask=None,
+ cross_attn_head_mask=None,
+ past_key_values=None,
+ inputs_embeds=None,
+ use_cache=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ ):
+ r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
+ of the decoder.
+ encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
+ selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0,
+ 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
+ cross-attention on hidden heads. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # retrieve input_ids and inputs_embeds
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = input_ids.size()
+ input_ids = input_ids.view(-1, input_shape[-1])
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
+
+ # past_key_values_length
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+
+ attention_mask = _prepare_4d_causal_attention_mask(
+ attention_mask, input_shape, inputs_embeds, past_key_values_length
+ )
+
+ # expand encoder attention mask
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ encoder_attention_mask = _prepare_4d_attention_mask(
+ encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
+ )
+
+ # embed positions
+ positions = self.embed_positions(input_shape, past_key_values_length)
+
+ hidden_states = inputs_embeds + positions
+
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
+ next_decoder_cache = () if use_cache else None
+
+ # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
+ for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
+ if attn_mask is not None:
+ if attn_mask.size()[0] != len(self.layers):
+ raise ValueError(
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
+ f" {head_mask.size()[0]}."
+ )
+ for idx, decoder_layer in enumerate(self.layers):
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop:
+ continue
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ head_mask[idx] if head_mask is not None else None,
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
+ None,
+ output_attentions,
+ use_cache,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ cross_attn_layer_head_mask=(
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
+ ),
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ if encoder_hidden_states is not None:
+ all_cross_attentions += (layer_outputs[2],)
+
+ # add final layer norm
+ hidden_states = self.layer_norm(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ next_cache = next_decoder_cache if use_cache else None
+ if not return_dict:
+ return tuple(
+ v
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ "The bare Blenderbot Model outputting raw hidden-states without any specific head on top.",
+ BLENDERBOT_START_DOCSTRING,
+)
+class BlenderbotModel(BlenderbotPreTrainedModel):
+ _tied_weights_keys = ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight"]
+
+ def __init__(self, config: BlenderbotConfig):
+ super().__init__(config)
+
+ padding_idx, vocab_size = config.pad_token_id, config.vocab_size
+ self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
+
+ self.encoder = BlenderbotEncoder(config, self.shared)
+ self.decoder = BlenderbotDecoder(config, self.shared)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs):
+ if pretrained_model_name_or_path == "facebook/blenderbot-90M":
+ warnings.warn(
+ "The checkpoint `facebook/blenderbot-90M` is deprecated. In the future, please use the identical"
+ " checkpoint `facebook/small_blenderbot-90M` with"
+ " `BlenderbotSmallModel.from_pretrained('facebook/small_blenderbot-90M')` instead.",
+ FutureWarning,
+ )
+ return BlenderbotSmallModel.from_pretrained(pretrained_model_name_or_path)
+
+ return super(BlenderbotModel, cls).from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
+
+ def get_input_embeddings(self):
+ return self.shared
+
+ def set_input_embeddings(self, value):
+ self.shared = value
+ self.encoder.embed_tokens = self.shared
+ self.decoder.embed_tokens = self.shared
+
+ def get_encoder(self):
+ return self.encoder
+
+ def get_decoder(self):
+ return self.decoder
+
+ @add_start_docstrings_to_model_forward(BLENDERBOT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Union[Tuple, BaseModelOutput]] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]:
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, BlenderbotModel
+
+ >>> model = BlenderbotModel.from_pretrained("facebook/blenderbot-400M-distill")
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
+
+ >>> inputs = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt")
+ >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
+ >>> outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_input_ids)
+
+ >>> last_hidden_states = outputs.last_hidden_state
+ >>> list(last_hidden_states.shape)
+ [1, 6, 1280]
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if encoder_outputs is None:
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=attention_mask,
+ head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+
+ return Seq2SeqModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ "The Blenderbot Model with a language modeling head. Can be used for summarization.", BLENDERBOT_START_DOCSTRING
+)
+class BlenderbotForConditionalGeneration(BlenderbotPreTrainedModel):
+ base_model_prefix = "model"
+ _keys_to_ignore_on_load_missing = ["final_logits_bias"]
+ _tied_weights_keys = ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight", "lm_head.weight"]
+
+ def __init__(self, config: BlenderbotConfig):
+ super().__init__(config)
+ self.model = BlenderbotModel(config)
+ self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
+ self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs):
+ if pretrained_model_name_or_path == "facebook/blenderbot-90M":
+ warnings.warn(
+ "The checkpoint `facebook/blenderbot-90M` is deprecated. In the future, please use the identical"
+ " checkpoint `facebook/small_blenderbot-90M` with"
+ " `BlenderbotSmallForConditionalGeneration.from_pretrained('facebook/small_blenderbot-90M')` instead.",
+ FutureWarning,
+ )
+ return BlenderbotSmallForConditionalGeneration.from_pretrained(pretrained_model_name_or_path)
+
+ return super(BlenderbotForConditionalGeneration, cls).from_pretrained(
+ pretrained_model_name_or_path, *model_args, **kwargs
+ )
+
+ def get_encoder(self):
+ return self.model.get_encoder()
+
+ def get_decoder(self):
+ return self.model.get_decoder()
+
+ def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None) -> nn.Embedding:
+ new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
+ self._resize_final_logits_bias(new_embeddings.weight.shape[0])
+ return new_embeddings
+
+ def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
+ old_num_tokens = self.final_logits_bias.shape[-1]
+ if new_num_tokens <= old_num_tokens:
+ new_bias = self.final_logits_bias[:, :new_num_tokens]
+ else:
+ extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
+ new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
+ self.register_buffer("final_logits_bias", new_bias)
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ @add_start_docstrings_to_model_forward(BLENDERBOT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+ @add_end_docstrings(BLENDERBOT_GENERATION_EXAMPLE)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Union[Tuple, BaseModelOutput]] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ Returns:
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if labels is not None:
+ if use_cache:
+ logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
+ use_cache = False
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
+ decoder_input_ids = shift_tokens_right(
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
+ )
+
+ outputs = self.model(
+ input_ids,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ encoder_outputs=encoder_outputs,
+ decoder_attention_mask=decoder_attention_mask,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ decoder_inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (lm_logits,) + outputs[1:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return Seq2SeqLMOutput(
+ loss=masked_lm_loss,
+ logits=lm_logits,
+ past_key_values=outputs.past_key_values,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ head_mask=None,
+ decoder_head_mask=None,
+ cross_attn_head_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ # cut decoder_input_ids if past is used
+ if past_key_values is not None:
+ past_length = past_key_values[0][0].shape[2]
+
+ # Some generation methods already pass only the last input ID
+ if decoder_input_ids.shape[1] > past_length:
+ remove_prefix_length = past_length
+ else:
+ # Default to old behavior: keep only final ID
+ remove_prefix_length = decoder_input_ids.shape[1] - 1
+
+ decoder_input_ids = decoder_input_ids[:, remove_prefix_length:]
+
+ return {
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
+ "encoder_outputs": encoder_outputs,
+ "past_key_values": past_key_values,
+ "decoder_input_ids": decoder_input_ids,
+ "attention_mask": attention_mask,
+ "head_mask": head_mask,
+ "decoder_head_mask": decoder_head_mask,
+ "cross_attn_head_mask": cross_attn_head_mask,
+ "use_cache": use_cache, # change this to avoid caching (presumably for debugging)
+ }
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ # cached cross_attention states don't have to be reordered -> they are always the same
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2])
+ + layer_past[2:],
+ )
+ return reordered_past
+
+
+# Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->Blenderbot
+class BlenderbotDecoderWrapper(BlenderbotPreTrainedModel):
+ """
+ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
+ used in combination with the [`EncoderDecoderModel`] framework.
+ """
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.decoder = BlenderbotDecoder(config)
+
+ def forward(self, *args, **kwargs):
+ return self.decoder(*args, **kwargs)
+
+
+# Copied from transformers.models.bart.modeling_bart.BartForCausalLM with Bart->Blenderbot, facebook/bart-base->facebook/blenderbot-400M-distill
+class BlenderbotForCausalLM(BlenderbotPreTrainedModel):
+ _tied_weights_keys = ["lm_head.weight"]
+
+ def __init__(self, config):
+ config = copy.deepcopy(config)
+ config.is_decoder = True
+ config.is_encoder_decoder = False
+ super().__init__(config)
+ self.model = BlenderbotDecoderWrapper(config)
+
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.decoder.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.decoder.embed_tokens = value
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def set_decoder(self, decoder):
+ self.model.decoder = decoder
+
+ def get_decoder(self):
+ return self.model.decoder
+
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
+ r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
+ if the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
+ in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+ head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional
+ tensors are only required when the model is used as a decoder in a Sequence to Sequence model.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, BlenderbotForCausalLM
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
+ >>> model = BlenderbotForCausalLM.from_pretrained("facebook/blenderbot-400M-distill", add_cross_attention=False)
+ >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
+ >>> outputs = model(**inputs)
+
+ >>> logits = outputs.logits
+ >>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size]
+ >>> list(logits.shape) == expected_shape
+ True
+ ```"""
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
+ outputs = self.model.decoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ head_mask=head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ logits = self.lm_head(outputs[0])
+
+ loss = None
+ if labels is not None:
+ labels = labels.to(logits.device)
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return (loss,) + output if loss is not None else output
+
+ return CausalLMOutputWithCrossAttentions(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs
+ ):
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
+ if attention_mask is None:
+ attention_mask = input_ids.new_ones(input_ids.shape)
+
+ if past_key_values:
+ past_length = past_key_values[0][0].shape[2]
+
+ # Some generation methods already pass only the last input ID
+ if input_ids.shape[1] > past_length:
+ remove_prefix_length = past_length
+ else:
+ # Default to old behavior: keep only final ID
+ remove_prefix_length = input_ids.shape[1] - 1
+
+ input_ids = input_ids[:, remove_prefix_length:]
+ # first step, decoder_cached_states are empty
+ return {
+ "input_ids": input_ids, # encoder_outputs is defined. input_ids not needed
+ "attention_mask": attention_mask,
+ "past_key_values": past_key_values,
+ "use_cache": use_cache,
+ }
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+ return reordered_past
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/modeling_flax_blenderbot.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/modeling_flax_blenderbot.py
new file mode 100644
index 0000000000000000000000000000000000000000..61239335be3b639eb65520aa51f97986938633c9
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/modeling_flax_blenderbot.py
@@ -0,0 +1,1505 @@
+# coding=utf-8
+# Copyright 2021 The Fairseq Authors and The Google Flax Team Authors And The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Flax Blenderbot model."""
+
+import math
+import random
+from functools import partial
+from typing import Callable, Optional, Tuple
+
+import flax.linen as nn
+import jax
+import jax.numpy as jnp
+from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
+from flax.linen import combine_masks, make_causal_mask
+from flax.linen.attention import dot_product_attention_weights
+from flax.traverse_util import flatten_dict, unflatten_dict
+from jax import lax
+from jax.random import PRNGKey
+
+from ...modeling_flax_outputs import (
+ FlaxBaseModelOutput,
+ FlaxBaseModelOutputWithPastAndCrossAttentions,
+ FlaxCausalLMOutputWithCrossAttentions,
+ FlaxSeq2SeqLMOutput,
+ FlaxSeq2SeqModelOutput,
+)
+from ...modeling_flax_utils import (
+ ACT2FN,
+ FlaxPreTrainedModel,
+ append_call_sample_docstring,
+ append_replace_return_docstrings,
+ overwrite_call_docstring,
+)
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+from .configuration_blenderbot import BlenderbotConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "BlenderbotConfig"
+_CHECKPOINT_FOR_DOC = "facebook/blenderbot-400M-distill"
+
+
+BLENDERBOT_START_DOCSTRING = r"""
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a Flax Linen
+ [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
+ regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
+
+ Finally, this model supports inherent JAX features such as:
+
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
+
+ Parameters:
+ config ([`BlenderbotConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+BLENDERBOT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ For translation and summarization training, `decoder_input_ids` should be provided. If no
+ `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
+ for denoising pre-training following the paper.
+ decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+
+ If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the
+ paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+ decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
+ range `[0, config.max_position_embeddings - 1]`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+BLENDERBOT_ENCODE_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+BLENDERBOT_DECODE_INPUTS_DOCSTRING = r"""
+ Args:
+ decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ For translation and summarization training, `decoder_input_ids` should be provided. If no
+ `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
+ for denoising pre-training following the paper.
+ encoder_outputs (`tuple(tuple(jnp.ndarray)`):
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+
+ If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the
+ paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
+ decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
+ range `[0, config.max_position_embeddings - 1]`.
+ past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
+ Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
+ auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+# Copied from transformers.models.bart.modeling_flax_bart.shift_tokens_right
+def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray:
+ """
+ Shift input ids one token to the right.
+ """
+ shifted_input_ids = jnp.zeros_like(input_ids)
+ shifted_input_ids = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1])
+ shifted_input_ids = shifted_input_ids.at[:, 0].set(decoder_start_token_id)
+
+ shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids)
+ return shifted_input_ids
+
+
+# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention with Bart->Blenderbot
+class FlaxBlenderbotAttention(nn.Module):
+ config: BlenderbotConfig
+ embed_dim: int
+ num_heads: int
+ dropout: float = 0.0
+ causal: bool = False
+ bias: bool = True
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self) -> None:
+ self.head_dim = self.embed_dim // self.num_heads
+ if self.head_dim * self.num_heads != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {self.num_heads})."
+ )
+
+ dense = partial(
+ nn.Dense,
+ self.embed_dim,
+ use_bias=self.bias,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
+ )
+
+ self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
+ self.out_proj = dense()
+
+ self.dropout_layer = nn.Dropout(rate=self.dropout)
+
+ if self.causal:
+ self.causal_mask = make_causal_mask(
+ jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool"
+ )
+
+ def _split_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
+
+ def _merge_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
+
+ @nn.compact
+ def _concatenate_to_cache(self, key, value, query, attention_mask):
+ """
+ This function takes projected key, value states from a single input token and concatenates the states to cached
+ states from previous steps. This function is slighly adapted from the official Flax repository:
+ https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
+ """
+ # detect if we're initializing by absence of existing cache data.
+ is_initialized = self.has_variable("cache", "cached_key")
+ cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
+ cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
+ cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
+
+ if is_initialized:
+ *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
+ # update key, value caches with our new 1d spatial slices
+ cur_index = cache_index.value
+ indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
+ key = lax.dynamic_update_slice(cached_key.value, key, indices)
+ value = lax.dynamic_update_slice(cached_value.value, value, indices)
+ cached_key.value = key
+ cached_value.value = value
+ num_updated_cache_vectors = query.shape[1]
+ cache_index.value = cache_index.value + num_updated_cache_vectors
+ # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
+ pad_mask = jnp.broadcast_to(
+ jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
+ tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
+ )
+ attention_mask = combine_masks(pad_mask, attention_mask)
+ return key, value, attention_mask
+
+ def __call__(
+ self,
+ hidden_states: jnp.ndarray,
+ key_value_states: Optional[jnp.ndarray] = None,
+ attention_mask: Optional[jnp.ndarray] = None,
+ init_cache: bool = False,
+ deterministic: bool = True,
+ ) -> Tuple[jnp.ndarray]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+ batch_size = hidden_states.shape[0]
+
+ # get query proj
+ query_states = self.q_proj(hidden_states)
+ # get key, value proj
+ if is_cross_attention:
+ # cross_attentions
+ key_states = self.k_proj(key_value_states)
+ value_states = self.v_proj(key_value_states)
+ else:
+ # self_attention
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ query_states = self._split_heads(query_states)
+ key_states = self._split_heads(key_states)
+ value_states = self._split_heads(value_states)
+
+ # handle cache prepare causal attention mask
+ if self.causal:
+ query_length, key_length = query_states.shape[1], key_states.shape[1]
+ if self.has_variable("cache", "cached_key"):
+ mask_shift = self.variables["cache"]["cache_index"]
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
+ causal_mask = lax.dynamic_slice(
+ self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
+ )
+ else:
+ causal_mask = self.causal_mask[:, :, :query_length, :key_length]
+ causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
+
+ # combine masks if needed
+ if attention_mask is not None and self.causal:
+ attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
+ attention_mask = combine_masks(attention_mask, causal_mask)
+ elif self.causal:
+ attention_mask = causal_mask
+ elif attention_mask is not None:
+ attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
+
+ # During fast autoregressive decoding, we feed one position at a time,
+ # and cache the keys and values step by step.
+ if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
+ key_states, value_states, attention_mask = self._concatenate_to_cache(
+ key_states, value_states, query_states, attention_mask
+ )
+
+ # Convert the boolean attention mask to an attention bias.
+ if attention_mask is not None:
+ # attention mask in the form of attention bias
+ attention_bias = lax.select(
+ attention_mask > 0,
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
+ jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
+ )
+ else:
+ attention_bias = None
+
+ dropout_rng = None
+ if not deterministic and self.dropout > 0.0:
+ dropout_rng = self.make_rng("dropout")
+
+ attn_weights = dot_product_attention_weights(
+ query_states,
+ key_states,
+ bias=attention_bias,
+ dropout_rng=dropout_rng,
+ dropout_rate=self.dropout,
+ broadcast_dropout=True,
+ deterministic=deterministic,
+ dtype=self.dtype,
+ precision=None,
+ )
+
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
+ attn_output = self._merge_heads(attn_output)
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights
+
+
+# Copied from transformers.models.mbart.modeling_flax_mbart.FlaxMBartEncoderLayer with MBart->Blenderbot
+class FlaxBlenderbotEncoderLayer(nn.Module):
+ config: BlenderbotConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self) -> None:
+ self.embed_dim = self.config.d_model
+ self.self_attn = FlaxBlenderbotAttention(
+ config=self.config,
+ embed_dim=self.embed_dim,
+ num_heads=self.config.encoder_attention_heads,
+ dropout=self.config.attention_dropout,
+ dtype=self.dtype,
+ )
+ self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
+ self.activation_fn = ACT2FN[self.config.activation_function]
+ self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
+ self.fc1 = nn.Dense(
+ self.config.encoder_ffn_dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
+ )
+ self.fc2 = nn.Dense(
+ self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
+ )
+ self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
+
+ def __call__(
+ self,
+ hidden_states: jnp.ndarray,
+ attention_mask: jnp.ndarray,
+ output_attentions: bool = True,
+ deterministic: bool = True,
+ ) -> Tuple[jnp.ndarray]:
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+ hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask)
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
+ hidden_states = residual + hidden_states
+
+ residual = hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartEncoderLayerCollection with Bart->Blenderbot
+class FlaxBlenderbotEncoderLayerCollection(nn.Module):
+ config: BlenderbotConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.layers = [
+ FlaxBlenderbotEncoderLayer(self.config, name=str(i), dtype=self.dtype)
+ for i in range(self.config.encoder_layers)
+ ]
+ self.layerdrop = self.config.encoder_layerdrop
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ all_attentions = () if output_attentions else None
+ all_hidden_states = () if output_hidden_states else None
+
+ for encoder_layer in self.layers:
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ dropout_probability = random.uniform(0, 1)
+ if not deterministic and (dropout_probability < self.layerdrop): # skip the layer
+ layer_outputs = (None, None)
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask,
+ output_attentions,
+ deterministic,
+ )
+ hidden_states = layer_outputs[0]
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ outputs = (hidden_states, all_hidden_states, all_attentions)
+
+ if not return_dict:
+ return tuple(v for v in outputs if v is not None)
+
+ return FlaxBaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
+ )
+
+
+# Copied from transformers.models.mbart.modeling_flax_mbart.FlaxMBartDecoderLayer with MBart->Blenderbot
+class FlaxBlenderbotDecoderLayer(nn.Module):
+ config: BlenderbotConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self) -> None:
+ self.embed_dim = self.config.d_model
+ self.self_attn = FlaxBlenderbotAttention(
+ config=self.config,
+ embed_dim=self.embed_dim,
+ num_heads=self.config.decoder_attention_heads,
+ dropout=self.config.attention_dropout,
+ causal=True,
+ dtype=self.dtype,
+ )
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
+ self.activation_fn = ACT2FN[self.config.activation_function]
+ self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
+
+ self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
+ self.encoder_attn = FlaxBlenderbotAttention(
+ config=self.config,
+ embed_dim=self.embed_dim,
+ num_heads=self.config.decoder_attention_heads,
+ dropout=self.config.attention_dropout,
+ dtype=self.dtype,
+ )
+ self.encoder_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
+ self.fc1 = nn.Dense(
+ self.config.decoder_ffn_dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
+ )
+ self.fc2 = nn.Dense(
+ self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
+ )
+ self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
+
+ def __call__(
+ self,
+ hidden_states: jnp.ndarray,
+ attention_mask: jnp.ndarray,
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
+ init_cache: bool = False,
+ output_attentions: bool = True,
+ deterministic: bool = True,
+ ) -> Tuple[jnp.ndarray]:
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Self Attention
+ hidden_states, self_attn_weights = self.self_attn(
+ hidden_states=hidden_states, attention_mask=attention_mask, init_cache=init_cache
+ )
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
+ hidden_states = residual + hidden_states
+
+ # Cross-Attention Block
+ cross_attn_weights = None
+ if encoder_hidden_states is not None:
+ residual = hidden_states
+
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
+ hidden_states, cross_attn_weights = self.encoder_attn(
+ hidden_states=hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attention_mask,
+ )
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
+ hidden_states = residual + hidden_states
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights, cross_attn_weights)
+
+ return outputs
+
+
+# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderLayerCollection with Bart->Blenderbot
+class FlaxBlenderbotDecoderLayerCollection(nn.Module):
+ config: BlenderbotConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.layers = [
+ FlaxBlenderbotDecoderLayer(self.config, name=str(i), dtype=self.dtype)
+ for i in range(self.config.decoder_layers)
+ ]
+ self.layerdrop = self.config.decoder_layerdrop
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask,
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
+ deterministic: bool = True,
+ init_cache: bool = False,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
+
+ for decoder_layer in self.layers:
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ dropout_probability = random.uniform(0, 1)
+ if not deterministic and (dropout_probability < self.layerdrop):
+ layer_outputs = (None, None, None)
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ init_cache=init_cache,
+ output_attentions=output_attentions,
+ deterministic=deterministic,
+ )
+
+ hidden_states = layer_outputs[0]
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ if encoder_hidden_states is not None:
+ all_cross_attentions += (layer_outputs[2],)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ outputs = [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions]
+
+ if not return_dict:
+ return tuple(v for v in outputs if v is not None)
+
+ return FlaxBaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+class FlaxBlenderbotEncoder(nn.Module):
+ config: BlenderbotConfig
+ embed_tokens: nn.Embed
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
+
+ embed_dim = self.config.d_model
+ self.padding_idx = self.config.pad_token_id
+ self.max_source_positions = self.config.max_position_embeddings
+ self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0
+
+ self.embed_positions = nn.Embed(
+ self.config.max_position_embeddings,
+ embed_dim,
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
+ )
+ self.layers = FlaxBlenderbotEncoderLayerCollection(self.config, self.dtype)
+ self.layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ position_ids,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ deterministic: bool = True,
+ ):
+ input_shape = input_ids.shape
+ input_ids = input_ids.reshape(-1, input_shape[-1])
+
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+
+ embed_pos = self.embed_positions(position_ids)
+
+ hidden_states = inputs_embeds + embed_pos
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
+
+ outputs = self.layers(
+ hidden_states,
+ attention_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ last_hidden_states = outputs[0]
+ last_hidden_states = self.layer_norm(last_hidden_states)
+
+ # update the last element in `hidden_states` after applying `layernorm` above
+ hidden_states = None
+ if output_hidden_states:
+ hidden_states = outputs[1]
+ hidden_states = hidden_states[:-1] + (last_hidden_states,)
+
+ if not return_dict:
+ outputs = (last_hidden_states, hidden_states) + (outputs[2:] if output_hidden_states else outputs[1:])
+ return tuple(v for v in outputs if v is not None)
+
+ return FlaxBaseModelOutput(
+ last_hidden_state=last_hidden_states,
+ hidden_states=hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+class FlaxBlenderbotDecoder(nn.Module):
+ config: BlenderbotConfig
+ embed_tokens: nn.Embed
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
+
+ embed_dim = self.config.d_model
+ self.padding_idx = self.config.pad_token_id
+ self.max_target_positions = self.config.max_position_embeddings
+ self.embed_scale = math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0
+
+ self.embed_positions = nn.Embed(
+ self.config.max_position_embeddings,
+ embed_dim,
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
+ )
+
+ self.layers = FlaxBlenderbotDecoderLayerCollection(self.config, self.dtype)
+ self.layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ position_ids,
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
+ init_cache: bool = False,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ deterministic: bool = True,
+ ):
+ input_shape = input_ids.shape
+ input_ids = input_ids.reshape(-1, input_shape[-1])
+
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+
+ # embed positions
+ positions = self.embed_positions(position_ids)
+
+ hidden_states = inputs_embeds + positions
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
+
+ outputs = self.layers(
+ hidden_states,
+ attention_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ deterministic=deterministic,
+ init_cache=init_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ last_hidden_states = outputs[0]
+ last_hidden_states = self.layer_norm(last_hidden_states)
+
+ # update the last element in `hidden_states` after applying `layernorm` above
+ hidden_states = None
+ if output_hidden_states:
+ hidden_states = outputs[1]
+ hidden_states = hidden_states[:-1] + (last_hidden_states,)
+
+ if not return_dict:
+ outputs = (last_hidden_states, hidden_states) + (outputs[2:] if output_hidden_states else outputs[1:])
+ return tuple(v for v in outputs if v is not None)
+
+ return FlaxBaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=last_hidden_states,
+ hidden_states=hidden_states,
+ attentions=outputs.attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+
+# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartModule with Bart->Blenderbot
+class FlaxBlenderbotModule(nn.Module):
+ config: BlenderbotConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.shared = nn.Embed(
+ self.config.vocab_size,
+ self.config.d_model,
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
+ dtype=self.dtype,
+ )
+
+ self.encoder = FlaxBlenderbotEncoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
+ self.decoder = FlaxBlenderbotDecoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
+
+ def _get_encoder_module(self):
+ return self.encoder
+
+ def _get_decoder_module(self):
+ return self.decoder
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ decoder_input_ids,
+ decoder_attention_mask,
+ position_ids,
+ decoder_position_ids,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ deterministic: bool = True,
+ ):
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=deterministic,
+ )
+
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ position_ids=decoder_position_ids,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=deterministic,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+
+ return FlaxSeq2SeqModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+
+class FlaxBlenderbotPreTrainedModel(FlaxPreTrainedModel):
+ config_class = BlenderbotConfig
+ base_model_prefix: str = "model"
+ module_class: nn.Module = None
+
+ def __init__(
+ self,
+ config: BlenderbotConfig,
+ input_shape: Tuple[int] = (1, 1),
+ seed: int = 0,
+ dtype: jnp.dtype = jnp.float32,
+ _do_init: bool = True,
+ **kwargs,
+ ):
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
+
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
+ # init input tensors
+ input_ids = jnp.zeros(input_shape, dtype="i4")
+ # make sure initialization pass will work for FlaxBlenderbotForSequenceClassificationModule
+ input_ids = input_ids.at[(..., -1)].set(self.config.eos_token_id)
+ attention_mask = jnp.ones_like(input_ids)
+ decoder_input_ids = input_ids
+ decoder_attention_mask = jnp.ones_like(input_ids)
+
+ batch_size, sequence_length = input_ids.shape
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
+ decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
+
+ params_rng, dropout_rng = jax.random.split(rng)
+ rngs = {"params": params_rng, "dropout": dropout_rng}
+
+ random_params = self.module.init(
+ rngs,
+ input_ids,
+ attention_mask,
+ decoder_input_ids,
+ decoder_attention_mask,
+ position_ids,
+ decoder_position_ids,
+ )["params"]
+
+ if params is not None:
+ random_params = flatten_dict(unfreeze(random_params))
+ params = flatten_dict(unfreeze(params))
+ for missing_key in self._missing_keys:
+ params[missing_key] = random_params[missing_key]
+ self._missing_keys = set()
+ return freeze(unflatten_dict(params))
+ else:
+ return random_params
+
+ def init_cache(self, batch_size, max_length, encoder_outputs):
+ r"""
+ Args:
+ batch_size (`int`):
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
+ max_length (`int`):
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
+ cache.
+ encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`):
+ `encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*:
+ `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*)
+ is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
+ cross-attention of the decoder.
+ """
+ # init input variables to retrieve cache
+ decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4")
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
+ decoder_position_ids = jnp.broadcast_to(
+ jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape
+ )
+
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
+ decoder_module = module._get_decoder_module()
+ return decoder_module(
+ decoder_input_ids,
+ decoder_attention_mask,
+ decoder_position_ids,
+ **kwargs,
+ )
+
+ init_variables = self.module.init(
+ jax.random.PRNGKey(0),
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ decoder_position_ids=decoder_position_ids,
+ encoder_hidden_states=encoder_outputs[0],
+ init_cache=True,
+ method=_decoder_forward, # we only need to call the decoder to init the cache
+ )
+ return unfreeze(init_variables["cache"])
+
+ @add_start_docstrings(BLENDERBOT_ENCODE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=BlenderbotConfig)
+ def encode(
+ self,
+ input_ids: jnp.ndarray,
+ attention_mask: Optional[jnp.ndarray] = None,
+ position_ids: Optional[jnp.ndarray] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ train: bool = False,
+ params: dict = None,
+ dropout_rng: PRNGKey = None,
+ ):
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, FlaxBlenderbotForConditionalGeneration
+
+ >>> model = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-400M-distill")
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
+
+ >>> text = "My friends are cool but they eat too many carbs."
+ >>> inputs = tokenizer(text, max_length=1024, return_tensors="jax")
+ >>> encoder_outputs = model.encode(**inputs)
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ if attention_mask is None:
+ attention_mask = jnp.ones_like(input_ids)
+ if position_ids is None:
+ batch_size, sequence_length = input_ids.shape
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
+
+ # Handle any PRNG if needed
+ rngs = {}
+ if dropout_rng is not None:
+ rngs["dropout"] = dropout_rng
+
+ def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs):
+ encode_module = module._get_encoder_module()
+ return encode_module(input_ids, attention_mask, position_ids, **kwargs)
+
+ return self.module.apply(
+ {"params": params or self.params},
+ input_ids=jnp.array(input_ids, dtype="i4"),
+ attention_mask=jnp.array(attention_mask, dtype="i4"),
+ position_ids=jnp.array(position_ids, dtype="i4"),
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=not train,
+ rngs=rngs,
+ method=_encoder_forward,
+ )
+
+ @add_start_docstrings(BLENDERBOT_DECODE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(
+ output_type=FlaxBaseModelOutputWithPastAndCrossAttentions, config_class=BlenderbotConfig
+ )
+ def decode(
+ self,
+ decoder_input_ids,
+ encoder_outputs,
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
+ decoder_position_ids: Optional[jnp.ndarray] = None,
+ past_key_values: dict = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ train: bool = False,
+ params: dict = None,
+ dropout_rng: PRNGKey = None,
+ ):
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> import jax.numpy as jnp
+ >>> from transformers import AutoTokenizer, FlaxBlenderbotForConditionalGeneration
+
+ >>> model = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-400M-distill")
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
+
+ >>> text = "My friends are cool but they eat too many carbs."
+ >>> inputs = tokenizer(text, max_length=1024, return_tensors="jax")
+ >>> encoder_outputs = model.encode(**inputs)
+
+ >>> decoder_start_token_id = model.config.decoder_start_token_id
+ >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
+
+ >>> outputs = model.decode(decoder_input_ids, encoder_outputs)
+ >>> last_decoder_hidden_states = outputs.last_hidden_state
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ encoder_hidden_states = encoder_outputs[0]
+ if encoder_attention_mask is None:
+ batch_size, sequence_length = encoder_hidden_states.shape[:2]
+ encoder_attention_mask = jnp.ones((batch_size, sequence_length))
+
+ batch_size, sequence_length = decoder_input_ids.shape
+ if decoder_attention_mask is None:
+ decoder_attention_mask = jnp.ones((batch_size, sequence_length))
+
+ if decoder_position_ids is None:
+ if past_key_values is not None:
+ raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
+
+ decoder_position_ids = jnp.broadcast_to(
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
+ )
+
+ # Handle any PRNG if needed
+ rngs = {}
+ if dropout_rng is not None:
+ rngs["dropout"] = dropout_rng
+
+ inputs = {"params": params or self.params}
+
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be
+ # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
+ # it can be changed by FlaxBlenderbotAttention module
+ if past_key_values:
+ inputs["cache"] = past_key_values
+ mutable = ["cache"]
+ else:
+ mutable = False
+
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
+ decoder_module = module._get_decoder_module()
+ return decoder_module(
+ decoder_input_ids,
+ decoder_attention_mask,
+ decoder_position_ids,
+ **kwargs,
+ )
+
+ outputs = self.module.apply(
+ inputs,
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=not train,
+ rngs=rngs,
+ mutable=mutable,
+ method=_decoder_forward,
+ )
+
+ # add updated cache to model output
+ if past_key_values is not None and return_dict:
+ outputs, past = outputs
+ outputs["past_key_values"] = unfreeze(past["cache"])
+ return outputs
+ elif past_key_values is not None and not return_dict:
+ outputs, past = outputs
+ outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
+
+ return outputs
+
+ @add_start_docstrings_to_model_forward(BLENDERBOT_INPUTS_DOCSTRING)
+ def __call__(
+ self,
+ input_ids: jnp.ndarray,
+ attention_mask: Optional[jnp.ndarray] = None,
+ decoder_input_ids: Optional[jnp.ndarray] = None,
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
+ position_ids: Optional[jnp.ndarray] = None,
+ decoder_position_ids: Optional[jnp.ndarray] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ train: bool = False,
+ params: dict = None,
+ dropout_rng: PRNGKey = None,
+ ):
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ # prepare encoder inputs
+ if attention_mask is None:
+ attention_mask = jnp.ones_like(input_ids)
+ if position_ids is None:
+ batch_size, sequence_length = input_ids.shape
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
+
+ # prepare decoder inputs
+ if decoder_input_ids is None:
+ decoder_input_ids = shift_tokens_right(
+ input_ids, self.config.pad_token_id, decoder_start_token_id=self.config.decoder_start_token_id
+ )
+ if decoder_attention_mask is None:
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
+ if decoder_position_ids is None:
+ batch_size, sequence_length = decoder_input_ids.shape
+ decoder_position_ids = jnp.broadcast_to(
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
+ )
+
+ # Handle any PRNG if needed
+ rngs = {"dropout": dropout_rng} if dropout_rng is not None else {}
+
+ return self.module.apply(
+ {"params": params or self.params},
+ input_ids=jnp.array(input_ids, dtype="i4"),
+ attention_mask=jnp.array(attention_mask, dtype="i4"),
+ position_ids=jnp.array(position_ids, dtype="i4"),
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=not train,
+ rngs=rngs,
+ )
+
+
+@add_start_docstrings(
+ "The bare MBart Model transformer outputting raw hidden-states without any specific head on top.",
+ BLENDERBOT_START_DOCSTRING,
+)
+class FlaxBlenderbotModel(FlaxBlenderbotPreTrainedModel):
+ config: BlenderbotConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+ module_class = FlaxBlenderbotModule
+
+
+append_call_sample_docstring(FlaxBlenderbotModel, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqModelOutput, _CONFIG_FOR_DOC)
+
+
+# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartForConditionalGenerationModule with Bart->Blenderbot
+class FlaxBlenderbotForConditionalGenerationModule(nn.Module):
+ config: BlenderbotConfig
+ dtype: jnp.dtype = jnp.float32
+ bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros
+
+ def setup(self):
+ self.model = FlaxBlenderbotModule(config=self.config, dtype=self.dtype)
+ self.lm_head = nn.Dense(
+ self.model.shared.num_embeddings,
+ use_bias=False,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
+ )
+ self.final_logits_bias = self.param("final_logits_bias", self.bias_init, (1, self.model.shared.num_embeddings))
+
+ def _get_encoder_module(self):
+ return self.model.encoder
+
+ def _get_decoder_module(self):
+ return self.model.decoder
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ decoder_input_ids,
+ decoder_attention_mask,
+ position_ids,
+ decoder_position_ids,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ deterministic: bool = True,
+ ):
+ outputs = self.model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ position_ids=position_ids,
+ decoder_position_ids=decoder_position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=deterministic,
+ )
+
+ hidden_states = outputs[0]
+
+ if self.config.tie_word_embeddings:
+ shared_embedding = self.model.variables["params"]["shared"]["embedding"]
+ lm_logits = self.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
+ else:
+ lm_logits = self.lm_head(hidden_states)
+
+ lm_logits += jax.lax.stop_gradient(self.final_logits_bias.astype(self.dtype))
+
+ if not return_dict:
+ output = (lm_logits,) + outputs[1:]
+ return output
+
+ return FlaxSeq2SeqLMOutput(
+ logits=lm_logits,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ )
+
+
+@add_start_docstrings(
+ "The Blenderbot Model with a language modeling head. Can be used for summarization.", BLENDERBOT_START_DOCSTRING
+)
+class FlaxBlenderbotForConditionalGeneration(FlaxBlenderbotPreTrainedModel):
+ module_class = FlaxBlenderbotForConditionalGenerationModule
+ dtype: jnp.dtype = jnp.float32
+
+ @add_start_docstrings(BLENDERBOT_DECODE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=BlenderbotConfig)
+ def decode(
+ self,
+ decoder_input_ids,
+ encoder_outputs,
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
+ decoder_position_ids: Optional[jnp.ndarray] = None,
+ past_key_values: dict = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ train: bool = False,
+ params: dict = None,
+ dropout_rng: PRNGKey = None,
+ ):
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> import jax.numpy as jnp
+ >>> from transformers import AutoTokenizer, FlaxBlenderbotForConditionalGeneration
+
+ >>> model = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-400M-distill")
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
+
+ >>> text = "My friends are cool but they eat too many carbs."
+ >>> inputs = tokenizer(text, max_length=1024, return_tensors="jax")
+ >>> encoder_outputs = model.encode(**inputs)
+
+ >>> decoder_start_token_id = model.config.decoder_start_token_id
+ >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
+
+ >>> outputs = model.decode(decoder_input_ids, encoder_outputs)
+ >>> logits = outputs.logits
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ encoder_hidden_states = encoder_outputs[0]
+ if encoder_attention_mask is None:
+ batch_size, sequence_length = encoder_hidden_states.shape[:2]
+ encoder_attention_mask = jnp.ones((batch_size, sequence_length))
+
+ batch_size, sequence_length = decoder_input_ids.shape
+ if decoder_attention_mask is None:
+ decoder_attention_mask = jnp.ones((batch_size, sequence_length))
+
+ if decoder_position_ids is None:
+ if past_key_values is not None:
+ raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
+
+ decoder_position_ids = jnp.broadcast_to(
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
+ )
+
+ # Handle any PRNG if needed
+ rngs = {}
+ if dropout_rng is not None:
+ rngs["dropout"] = dropout_rng
+
+ inputs = {"params": params or self.params}
+
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be
+ # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
+ # it can be changed by FlaxBlenderbotAttention module
+ if past_key_values:
+ inputs["cache"] = past_key_values
+ mutable = ["cache"]
+ else:
+ mutable = False
+
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
+ decoder_module = module._get_decoder_module()
+ outputs = decoder_module(
+ decoder_input_ids,
+ decoder_attention_mask,
+ decoder_position_ids,
+ **kwargs,
+ )
+ hidden_states = outputs[0]
+
+ if self.config.tie_word_embeddings:
+ shared_embedding = module.model.variables["params"]["shared"]["embedding"]
+ lm_logits = module.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
+ else:
+ lm_logits = module.lm_head(hidden_states)
+
+ lm_logits += module.final_logits_bias
+ return lm_logits, outputs
+
+ outputs = self.module.apply(
+ inputs,
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=not train,
+ rngs=rngs,
+ mutable=mutable,
+ method=_decoder_forward,
+ )
+
+ if past_key_values is None:
+ lm_logits, decoder_outputs = outputs
+ else:
+ (lm_logits, decoder_outputs), past = outputs
+
+ if return_dict:
+ outputs = FlaxCausalLMOutputWithCrossAttentions(
+ logits=lm_logits,
+ hidden_states=decoder_outputs.hidden_states,
+ attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ )
+ else:
+ outputs = (lm_logits,) + decoder_outputs[1:]
+
+ # add updated cache to model output
+ if past_key_values is not None and return_dict:
+ outputs["past_key_values"] = unfreeze(past["cache"])
+ return outputs
+ elif past_key_values is not None and not return_dict:
+ outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
+
+ return outputs
+
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ max_length,
+ attention_mask: Optional[jax.Array] = None,
+ decoder_attention_mask: Optional[jax.Array] = None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ # initializing the cache
+ batch_size, seq_length = decoder_input_ids.shape
+
+ past_key_values = self.init_cache(batch_size, max_length, encoder_outputs)
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
+ # But since the decoder uses a causal mask, those positions are masked anyways.
+ # Thus we can create a single static attention_mask here, which is more efficient for compilation
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
+ if decoder_attention_mask is not None:
+ position_ids = decoder_attention_mask.cumsum(axis=-1) - 1
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0))
+ else:
+ position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
+
+ return {
+ "past_key_values": past_key_values,
+ "encoder_outputs": encoder_outputs,
+ "encoder_attention_mask": attention_mask,
+ "decoder_attention_mask": extended_attention_mask,
+ "decoder_position_ids": position_ids,
+ }
+
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
+ model_kwargs["decoder_position_ids"] = model_kwargs["decoder_position_ids"][:, -1:] + 1
+ return model_kwargs
+
+
+FLAX_BLENDERBOT_CONDITIONAL_GENERATION_DOCSTRING = r"""
+ Returns:
+
+ Conversation example::
+
+ ```py
+ >>> from transformers import AutoTokenizer, FlaxBlenderbotForConditionalGeneration
+
+ >>> model = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-400M-distill")
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
+
+ >>> UTTERANCE = "My friends are cool but they eat too many carbs."
+ >>> inputs = tokenizer([UTTERANCE], max_length=1024, return_tensors="np")
+
+ >>> # Generate Reply
+ >>> reply_ids = model.generate(inputs["input_ids"], num_beams=4, max_length=5, early_stopping=True).sequences
+ >>> print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in reply_ids])
+ ```
+"""
+
+overwrite_call_docstring(
+ FlaxBlenderbotForConditionalGeneration,
+ BLENDERBOT_INPUTS_DOCSTRING + FLAX_BLENDERBOT_CONDITIONAL_GENERATION_DOCSTRING,
+)
+append_replace_return_docstrings(
+ FlaxBlenderbotForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC
+)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/modeling_tf_blenderbot.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/modeling_tf_blenderbot.py
new file mode 100644
index 0000000000000000000000000000000000000000..ccb07d20ecf97d6d5f205669f38534c5953a946f
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/modeling_tf_blenderbot.py
@@ -0,0 +1,1556 @@
+# coding=utf-8
+# Copyright 2021 The Facebook, Inc and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" TF 2.0 Blenderbot model."""
+
+
+from __future__ import annotations
+
+import os
+import random
+import warnings
+from typing import List, Optional, Tuple, Union
+
+import tensorflow as tf
+
+from ...activations_tf import get_tf_activation
+from ...modeling_tf_outputs import (
+ TFBaseModelOutput,
+ TFBaseModelOutputWithPastAndCrossAttentions,
+ TFSeq2SeqLMOutput,
+ TFSeq2SeqModelOutput,
+)
+
+# Public API
+from ...modeling_tf_utils import (
+ TFCausalLanguageModelingLoss,
+ TFPreTrainedModel,
+ keras,
+ keras_serializable,
+ unpack_inputs,
+)
+from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
+from ...utils import (
+ add_code_sample_docstrings,
+ add_end_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_blenderbot import BlenderbotConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "facebook/blenderbot-400M-distill"
+_CONFIG_FOR_DOC = "BlenderbotConfig"
+
+
+LARGE_NEGATIVE = -1e8
+
+
+# Copied from transformers.models.bart.modeling_tf_bart.shift_tokens_right
+def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int):
+ pad_token_id = tf.cast(pad_token_id, input_ids.dtype)
+ decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype)
+ start_tokens = tf.fill(
+ (shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype)
+ )
+ shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)
+ # replace possible -100 values in labels by `pad_token_id`
+ shifted_input_ids = tf.where(
+ shifted_input_ids == -100,
+ tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)),
+ shifted_input_ids,
+ )
+
+ # "Verify that `labels` has only positive values and -100"
+ assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype))
+
+ # Make sure the assertion op is called by wrapping the result in an identity no-op
+ with tf.control_dependencies([assert_gte0]):
+ shifted_input_ids = tf.identity(shifted_input_ids)
+
+ return shifted_input_ids
+
+
+# Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask
+def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0):
+ """
+ Make causal mask used for bi-directional self-attention.
+ """
+ bsz = input_ids_shape[0]
+ tgt_len = input_ids_shape[1]
+ mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE
+ mask_cond = tf.range(shape_list(mask)[-1])
+
+ mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask)
+
+ if past_key_values_length > 0:
+ mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1)
+
+ return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1))
+
+
+# Copied from transformers.models.bart.modeling_tf_bart._expand_mask
+def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None):
+ """
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
+ """
+ src_len = shape_list(mask)[1]
+ tgt_len = tgt_len if tgt_len is not None else src_len
+ one_cst = tf.constant(1.0)
+ mask = tf.cast(mask, dtype=one_cst.dtype)
+ expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
+
+ return (one_cst - expanded_mask) * LARGE_NEGATIVE
+
+
+class TFBlenderbotLearnedPositionalEmbedding(keras.layers.Embedding):
+ """
+ This module learns positional embeddings up to a fixed maximum size.
+ """
+
+ def __init__(self, num_embeddings: int, embedding_dim: int, **kwargs):
+ super().__init__(num_embeddings, embedding_dim, **kwargs)
+
+ def call(
+ self, input_shape: tf.TensorShape, past_key_values_length: int = 0, position_ids: tf.Tensor | None = None
+ ):
+ """Input is expected to be of size [bsz x seqlen]."""
+ if position_ids is None:
+ seq_len = input_shape[1]
+ position_ids = tf.range(seq_len, delta=1, name="range")
+ position_ids += past_key_values_length
+
+ return super().call(tf.cast(position_ids, dtype=tf.int32))
+
+
+# Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->Blenderbot
+class TFBlenderbotAttention(keras.layers.Layer):
+ """Multi-headed attention from "Attention Is All You Need"""
+
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ is_decoder: bool = False,
+ bias: bool = True,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ self.embed_dim = embed_dim
+
+ self.num_heads = num_heads
+ self.dropout = keras.layers.Dropout(dropout)
+ self.head_dim = embed_dim // num_heads
+ if (self.head_dim * num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+
+ self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj")
+ self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj")
+ self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj")
+ self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj")
+
+ def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):
+ return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ key_value_states: tf.Tensor | None = None,
+ past_key_value: Tuple[Tuple[tf.Tensor]] | None = None,
+ attention_mask: tf.Tensor | None = None,
+ layer_head_mask: tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Tuple[tf.Tensor, tf.Tensor | None]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+ bsz, tgt_len, embed_dim = shape_list(hidden_states)
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scaling
+ # get key, value proj
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = tf.concat([past_key_value[0], key_states], axis=2)
+ value_states = tf.concat([past_key_value[1], value_states], axis=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)
+ key_states = tf.reshape(key_states, proj_shape)
+ value_states = tf.reshape(value_states, proj_shape)
+
+ src_len = shape_list(key_states)[1]
+ attn_weights = tf.matmul(query_states, key_states, transpose_b=True)
+
+ tf.debugging.assert_equal(
+ shape_list(attn_weights),
+ [bsz * self.num_heads, tgt_len, src_len],
+ message=(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {shape_list(attn_weights)}"
+ ),
+ )
+
+ if attention_mask is not None:
+ tf.debugging.assert_equal(
+ shape_list(attention_mask),
+ [bsz, 1, tgt_len, src_len],
+ message=(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
+ f" {shape_list(attention_mask)}"
+ ),
+ )
+
+ attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)
+ attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask
+ attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
+
+ attn_weights = stable_softmax(attn_weights, axis=-1)
+
+ if layer_head_mask is not None:
+ tf.debugging.assert_equal(
+ shape_list(layer_head_mask),
+ [self.num_heads],
+ message=(
+ f"Head mask for a single layer should be of size {(self.num_heads)}, but is"
+ f" {shape_list(layer_head_mask)}"
+ ),
+ )
+
+ attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(
+ attn_weights, (bsz, self.num_heads, tgt_len, src_len)
+ )
+ attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
+
+ attn_probs = self.dropout(attn_weights, training=training)
+ attn_output = tf.matmul(attn_probs, value_states)
+
+ tf.debugging.assert_equal(
+ shape_list(attn_output),
+ [bsz * self.num_heads, tgt_len, self.head_dim],
+ message=(
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {shape_list(attn_output)}"
+ ),
+ )
+
+ attn_output = tf.transpose(
+ tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)
+ )
+ attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))
+
+ attn_output = self.out_proj(attn_output)
+ attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))
+
+ return attn_output, attn_weights, past_key_value
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "k_proj", None) is not None:
+ with tf.name_scope(self.k_proj.name):
+ self.k_proj.build([None, None, self.embed_dim])
+ if getattr(self, "q_proj", None) is not None:
+ with tf.name_scope(self.q_proj.name):
+ self.q_proj.build([None, None, self.embed_dim])
+ if getattr(self, "v_proj", None) is not None:
+ with tf.name_scope(self.v_proj.name):
+ self.v_proj.build([None, None, self.embed_dim])
+ if getattr(self, "out_proj", None) is not None:
+ with tf.name_scope(self.out_proj.name):
+ self.out_proj.build([None, None, self.embed_dim])
+
+
+# Copied from transformers.models.mbart.modeling_tf_mbart.TFMBartEncoderLayer with MBart->Blenderbot
+class TFBlenderbotEncoderLayer(keras.layers.Layer):
+ def __init__(self, config: BlenderbotConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.embed_dim = config.d_model
+ self.self_attn = TFBlenderbotAttention(
+ self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn"
+ )
+ self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
+ self.dropout = keras.layers.Dropout(config.dropout)
+ self.activation_fn = get_tf_activation(config.activation_function)
+ self.activation_dropout = keras.layers.Dropout(config.activation_dropout)
+ self.fc1 = keras.layers.Dense(config.encoder_ffn_dim, name="fc1")
+ self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2")
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
+ self.config = config
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ attention_mask: tf.Tensor,
+ layer_head_mask: tf.Tensor,
+ training: Optional[bool] = False,
+ ):
+ """
+ Args:
+ hidden_states (`tf.Tensor`): input to the layer of shape *(batch, seq_len, embed_dim)*
+ attention_mask (`tf.Tensor`): attention mask of size
+ *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
+ layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
+ *(encoder_attention_heads,)*
+ """
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+ hidden_states, self_attn_weights, _ = self.self_attn(
+ hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask
+ )
+
+ tf.debugging.assert_equal(
+ shape_list(hidden_states),
+ shape_list(residual),
+ message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}",
+ )
+
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = residual + hidden_states
+
+ residual = hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = self.activation_dropout(hidden_states, training=training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = residual + hidden_states
+
+ return hidden_states, self_attn_weights
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "self_attn", None) is not None:
+ with tf.name_scope(self.self_attn.name):
+ self.self_attn.build(None)
+ if getattr(self, "self_attn_layer_norm", None) is not None:
+ with tf.name_scope(self.self_attn_layer_norm.name):
+ self.self_attn_layer_norm.build([None, None, self.embed_dim])
+ if getattr(self, "fc1", None) is not None:
+ with tf.name_scope(self.fc1.name):
+ self.fc1.build([None, None, self.embed_dim])
+ if getattr(self, "fc2", None) is not None:
+ with tf.name_scope(self.fc2.name):
+ self.fc2.build([None, None, self.config.encoder_ffn_dim])
+ if getattr(self, "final_layer_norm", None) is not None:
+ with tf.name_scope(self.final_layer_norm.name):
+ self.final_layer_norm.build([None, None, self.embed_dim])
+
+
+# Copied from transformers.models.mbart.modeling_tf_mbart.TFMBartDecoderLayer with MBart->Blenderbot
+class TFBlenderbotDecoderLayer(keras.layers.Layer):
+ def __init__(self, config: BlenderbotConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.embed_dim = config.d_model
+ self.self_attn = TFBlenderbotAttention(
+ embed_dim=self.embed_dim,
+ num_heads=config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ name="self_attn",
+ is_decoder=True,
+ )
+ self.dropout = keras.layers.Dropout(config.dropout)
+ self.activation_fn = get_tf_activation(config.activation_function)
+ self.activation_dropout = keras.layers.Dropout(config.activation_dropout)
+
+ self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
+ self.encoder_attn = TFBlenderbotAttention(
+ self.embed_dim,
+ config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ name="encoder_attn",
+ is_decoder=True,
+ )
+ self.encoder_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm")
+ self.fc1 = keras.layers.Dense(config.decoder_ffn_dim, name="fc1")
+ self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2")
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
+ self.config = config
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ attention_mask: tf.Tensor | None = None,
+ encoder_hidden_states: tf.Tensor | None = None,
+ encoder_attention_mask: tf.Tensor | None = None,
+ layer_head_mask: tf.Tensor | None = None,
+ cross_attn_layer_head_mask: tf.Tensor | None = None,
+ past_key_value: Tuple[tf.Tensor] | None = None,
+ training: Optional[bool] = False,
+ ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:
+ """
+ Args:
+ hidden_states (`tf.Tensor`): input to the layer of shape *(batch, seq_len, embed_dim)*
+ attention_mask (`tf.Tensor`): attention mask of size
+ *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
+ encoder_hidden_states (`tf.Tensor`):
+ cross attention input to the layer of shape *(batch, seq_len, embed_dim)*
+ encoder_attention_mask (`tf.Tensor`): encoder attention mask of size
+ *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
+ layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
+ *(decoder_attention_heads,)*
+ cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module.
+ *(decoder_attention_heads,)*
+ past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states
+ """
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Self Attention
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ past_key_value=self_attn_past_key_value,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ )
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = residual + hidden_states
+
+ # Cross-Attention Block
+ cross_attn_present_key_value = None
+ cross_attn_weights = None
+ if encoder_hidden_states is not None:
+ residual = hidden_states
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
+
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
+ hidden_states=hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attention_mask,
+ layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=cross_attn_past_key_value,
+ )
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = residual + hidden_states
+
+ # add cross-attn to positions 3,4 of present_key_value tuple
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = self.activation_dropout(hidden_states, training=training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = residual + hidden_states
+
+ return (
+ hidden_states,
+ self_attn_weights,
+ cross_attn_weights,
+ present_key_value,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "self_attn", None) is not None:
+ with tf.name_scope(self.self_attn.name):
+ self.self_attn.build(None)
+ if getattr(self, "self_attn_layer_norm", None) is not None:
+ with tf.name_scope(self.self_attn_layer_norm.name):
+ self.self_attn_layer_norm.build([None, None, self.embed_dim])
+ if getattr(self, "encoder_attn", None) is not None:
+ with tf.name_scope(self.encoder_attn.name):
+ self.encoder_attn.build(None)
+ if getattr(self, "encoder_attn_layer_norm", None) is not None:
+ with tf.name_scope(self.encoder_attn_layer_norm.name):
+ self.encoder_attn_layer_norm.build([None, None, self.embed_dim])
+ if getattr(self, "fc1", None) is not None:
+ with tf.name_scope(self.fc1.name):
+ self.fc1.build([None, None, self.embed_dim])
+ if getattr(self, "fc2", None) is not None:
+ with tf.name_scope(self.fc2.name):
+ self.fc2.build([None, None, self.config.decoder_ffn_dim])
+ if getattr(self, "final_layer_norm", None) is not None:
+ with tf.name_scope(self.final_layer_norm.name):
+ self.final_layer_norm.build([None, None, self.embed_dim])
+
+
+class TFBlenderbotPreTrainedModel(TFPreTrainedModel):
+ config_class = BlenderbotConfig
+ base_model_prefix = "model"
+
+
+BLENDERBOT_START_DOCSTRING = r"""
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TensorFlow models and layers in `transformers` accept two formats as input:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional argument.
+
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
+ positional argument:
+
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
+
+ Note that when creating models and layers with
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
+ about any of this, as you can just pass inputs like you would to any other Python function!
+
+
+
+ Args:
+ config ([`BlenderbotConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+BLENDERBOT_GENERATION_EXAMPLE = r"""
+ Conversation example::
+
+ ```py
+ >>> from transformers import AutoTokenizer, TFBlenderbotForConditionalGeneration
+
+ >>> mname = "facebook/blenderbot-400M-distill"
+ >>> model = TFBlenderbotForConditionalGeneration.from_pretrained(mname)
+ >>> tokenizer = AutoTokenizer.from_pretrained(mname)
+ >>> UTTERANCE = "My friends are cool but they eat too many carbs."
+ >>> print("Human: ", UTTERANCE)
+
+ >>> inputs = tokenizer([UTTERANCE], return_tensors="tf")
+ >>> reply_ids = model.generate(**inputs)
+ >>> print("Bot: ", tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0])
+
+ >>> REPLY = "I'm not sure"
+ >>> print("Human: ", REPLY)
+ >>> NEXT_UTTERANCE = (
+ ... "My friends are cool but they eat too many carbs. That's unfortunate. "
+ ... "Are they trying to lose weight or are they just trying to be healthier? "
+ ... " I'm not sure."
+ ... )
+ >>> inputs = tokenizer([NEXT_UTTERANCE], return_tensors="tf")
+ >>> next_reply_ids = model.generate(**inputs)
+ >>> print("Bot: ", tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0])
+ ```
+"""
+
+BLENDERBOT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`tf.Tensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`tf.Tensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ Blenderbot uses the `bos_token_id` as the starting token for `decoder_input_ids` generation. If
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+ decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.
+ decoder_position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
+ range `[0, config.max_position_embeddings - 1]`.
+ head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ encoder_outputs (`tf.FloatTensor`, *optional*):
+ hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ of shape `(batch_size, sequence_length, hidden_size)` is a sequence of
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`). Set to `False` during training, `True` during generation
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
+ config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+"""
+
+
+@keras_serializable
+class TFBlenderbotEncoder(keras.layers.Layer):
+ config_class = BlenderbotConfig
+ """
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
+ [`TFBlenderbotEncoderLayer`].
+
+ Args:
+ config: BlenderbotConfig
+ """
+
+ def __init__(self, config: BlenderbotConfig, embed_tokens: Optional[keras.layers.Embedding] = None, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+ self.dropout = keras.layers.Dropout(config.dropout)
+ self.layerdrop = config.encoder_layerdrop
+ self.padding_idx = config.pad_token_id
+ self.max_source_positions = config.max_position_embeddings
+ self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
+
+ self.embed_tokens = embed_tokens
+ self.embed_positions = TFBlenderbotLearnedPositionalEmbedding(
+ config.max_position_embeddings,
+ config.d_model,
+ name="embed_positions",
+ )
+ self.layers = [TFBlenderbotEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)]
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm")
+
+ def get_embed_tokens(self):
+ return self.embed_tokens
+
+ def set_embed_tokens(self, embed_tokens):
+ self.embed_tokens = embed_tokens
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids=None,
+ inputs_embeds=None,
+ attention_mask=None,
+ head_mask=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ training=False,
+ ):
+ """
+ Args:
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, `optional):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value
+ in the config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail. This argument can be used only in eager mode, in graph mode the value in the config
+ will be used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used
+ in eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+ """
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = shape_list(input_ids)
+ elif inputs_embeds is not None:
+ input_shape = shape_list(inputs_embeds)[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if inputs_embeds is None:
+ check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim)
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+
+ embed_pos = self.embed_positions(input_shape)
+ hidden_states = inputs_embeds + embed_pos
+ hidden_states = self.dropout(hidden_states, training=training)
+
+ # check attention mask and invert
+ if attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ attention_mask = _expand_mask(attention_mask)
+ else:
+ attention_mask = None
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ # check if head_mask has a correct number of layers specified if desired
+ if head_mask is not None:
+ tf.debugging.assert_equal(
+ shape_list(head_mask)[0],
+ len(self.layers),
+ message=(
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
+ f" {shape_list(head_mask)[0]}."
+ ),
+ )
+
+ # encoder layers
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ dropout_probability = random.uniform(0, 1)
+ if training and (dropout_probability < self.layerdrop): # skip the layer
+ continue
+
+ hidden_states, attn = encoder_layer(
+ hidden_states,
+ attention_mask,
+ head_mask[idx] if head_mask is not None else None,
+ )
+
+ if output_attentions:
+ all_attentions += (attn,)
+
+ hidden_states = self.layer_norm(hidden_states)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return TFBaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embed_positions", None) is not None:
+ with tf.name_scope(self.embed_positions.name):
+ self.embed_positions.build(None)
+ if getattr(self, "layer_norm", None) is not None:
+ with tf.name_scope(self.layer_norm.name):
+ self.layer_norm.build([None, None, self.config.d_model])
+ if getattr(self, "layers", None) is not None:
+ for layer in self.layers:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+@keras_serializable
+class TFBlenderbotDecoder(keras.layers.Layer):
+ config_class = BlenderbotConfig
+ """
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TFBlenderbotDecoderLayer`]
+
+ Args:
+ config: BlenderbotConfig
+ embed_tokens: output embedding
+ """
+
+ def __init__(self, config: BlenderbotConfig, embed_tokens: Optional[keras.layers.Embedding] = None, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+ self.padding_idx = config.pad_token_id
+ self.embed_tokens = embed_tokens
+ self.layerdrop = config.decoder_layerdrop
+ self.embed_positions = TFBlenderbotLearnedPositionalEmbedding(
+ config.max_position_embeddings,
+ config.d_model,
+ name="embed_positions",
+ )
+ self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
+ self.layers = [TFBlenderbotDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)]
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm")
+
+ self.dropout = keras.layers.Dropout(config.dropout)
+
+ def get_embed_tokens(self):
+ return self.embed_tokens
+
+ def set_embed_tokens(self, embed_tokens):
+ self.embed_tokens = embed_tokens
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids=None,
+ inputs_embeds=None,
+ attention_mask=None,
+ position_ids=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ head_mask=None,
+ cross_attn_head_mask=None,
+ past_key_values=None,
+ use_cache=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ training=False,
+ ):
+ r"""
+ Args:
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
+ range `[0, config.max_position_embeddings - 1]`.
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
+ of the decoder.
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
+ selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
+ decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value
+ in the config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail. This argument can be used only in eager mode, in graph mode the value in the config
+ will be used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used
+ in eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+ """
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = shape_list(input_ids)
+ elif inputs_embeds is not None:
+ input_shape = shape_list(inputs_embeds)[:-1]
+ else:
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
+
+ past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0
+
+ # embed positions
+ if position_ids is None:
+ positions = self.embed_positions(input_shape, past_key_values_length)
+ else:
+ positions = self.embed_positions(input_shape, position_ids=position_ids)
+
+ if inputs_embeds is None:
+ check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim)
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+
+ hidden_states = inputs_embeds
+
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ if input_shape[-1] > 1:
+ combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length)
+ else:
+ combined_attention_mask = _expand_mask(
+ tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1]
+ )
+
+ if attention_mask is not None:
+ combined_attention_mask = combined_attention_mask + _expand_mask(attention_mask, tgt_len=input_shape[-1])
+
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1])
+
+ hidden_states = hidden_states + positions
+ hidden_states = self.dropout(hidden_states, training=training)
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_cross_attns = () if (output_attentions and encoder_hidden_states is not None) else None
+ present_key_values = () if use_cache else None
+
+ # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired
+ for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]:
+ if attn_mask is not None:
+ tf.debugging.assert_equal(
+ shape_list(attn_mask)[0],
+ len(self.layers),
+ message=(
+ f"The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for"
+ f" {shape_list(attn_mask)[0]}."
+ ),
+ )
+ for idx, decoder_layer in enumerate(self.layers):
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+ dropout_probability = random.uniform(0, 1)
+
+ if training and (dropout_probability < self.layerdrop):
+ continue
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+
+ hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer(
+ hidden_states,
+ attention_mask=combined_attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ layer_head_mask=head_mask[idx] if head_mask is not None else None,
+ cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
+ past_key_value=past_key_value,
+ )
+
+ if use_cache:
+ present_key_values += (present_key_value,)
+
+ if output_attentions:
+ all_self_attns += (layer_self_attn,)
+
+ if encoder_hidden_states is not None:
+ all_cross_attns += (layer_cross_attn,)
+
+ hidden_states = self.layer_norm(hidden_states)
+
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ if not return_dict:
+ return hidden_states, present_key_values, all_hidden_states, all_self_attns, all_cross_attns
+ else:
+ return TFBaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=present_key_values,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ cross_attentions=all_cross_attns,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embed_positions", None) is not None:
+ with tf.name_scope(self.embed_positions.name):
+ self.embed_positions.build(None)
+ if getattr(self, "layer_norm", None) is not None:
+ with tf.name_scope(self.layer_norm.name):
+ self.layer_norm.build([None, None, self.config.d_model])
+ if getattr(self, "layers", None) is not None:
+ for layer in self.layers:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+@keras_serializable
+class TFBlenderbotMainLayer(keras.layers.Layer):
+ config_class = BlenderbotConfig
+
+ def __init__(self, config: BlenderbotConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.shared = keras.layers.Embedding(
+ input_dim=config.vocab_size,
+ output_dim=config.d_model,
+ embeddings_initializer=keras.initializers.TruncatedNormal(stddev=self.config.init_std),
+ name="model.shared",
+ )
+ # Additional attribute to specify the expected name scope of the layer (for loading/storing weights)
+ self.shared.load_weight_prefix = "model.shared"
+
+ self.encoder = TFBlenderbotEncoder(config, self.shared, name="encoder")
+ self.decoder = TFBlenderbotDecoder(config, self.shared, name="decoder")
+
+ def get_input_embeddings(self):
+ return self.shared
+
+ def set_input_embeddings(self, new_embeddings):
+ self.shared = new_embeddings
+ self.encoder.embed_tokens = self.shared
+ self.decoder.embed_tokens = self.shared
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ decoder_input_ids=None,
+ decoder_attention_mask=None,
+ decoder_position_ids=None,
+ head_mask=None,
+ decoder_head_mask=None,
+ cross_attn_head_mask=None,
+ encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
+ past_key_values=None,
+ inputs_embeds=None,
+ decoder_inputs_embeds=None,
+ use_cache=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ training=False,
+ **kwargs,
+ ):
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+
+ if encoder_outputs is None:
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True
+ elif return_dict and not isinstance(encoder_outputs, TFBaseModelOutput):
+ encoder_outputs = TFBaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+ # If the user passed a TFBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False
+ elif not return_dict and not isinstance(encoder_outputs, tuple):
+ encoder_outputs = encoder_outputs.to_tuple()
+
+ decoder_outputs = self.decoder(
+ decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ position_ids=decoder_position_ids,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=attention_mask,
+ head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+
+ return TFSeq2SeqModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ # The shared/tied weights expect to be in the model base namespace
+ # Adding "/" to the end (not the start!) of a tf.name_scope puts it in the root namespace rather than
+ # the current one.
+ with tf.name_scope(self.shared.load_weight_prefix + "/" + self.shared.name + "/"):
+ self.shared.build(None)
+ if getattr(self, "encoder", None) is not None:
+ with tf.name_scope(self.encoder.name):
+ self.encoder.build(None)
+ if getattr(self, "decoder", None) is not None:
+ with tf.name_scope(self.decoder.name):
+ self.decoder.build(None)
+
+
+@add_start_docstrings(
+ "The bare BLENDERBOT Model outputting raw hidden-states without any specific head on top.",
+ BLENDERBOT_START_DOCSTRING,
+)
+class TFBlenderbotModel(TFBlenderbotPreTrainedModel):
+ def __init__(self, config: BlenderbotConfig, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.model = TFBlenderbotMainLayer(config, name="model")
+
+ def get_encoder(self):
+ return self.model.encoder
+
+ def get_decoder(self):
+ return self.model.decoder
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs):
+ if pretrained_model_name_or_path == "facebook/blenderbot-90M":
+ from ..blenderbot_small import TFBlenderbotSmallModel
+
+ warnings.warn(
+ "The checkpoint `facebook/blenderbot-90M` is deprecated. In the future, please use the identical"
+ " checkpoint `facebook/small_blenderbot-90M` with"
+ " `TFBlenderbotSmallForConditionalGeneration.from_pretrained('facebook/small_blenderbot-90M')`"
+ " instead.",
+ FutureWarning,
+ )
+ return TFBlenderbotSmallModel.from_pretrained(pretrained_model_name_or_path)
+
+ return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(BLENDERBOT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFSeq2SeqModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: tf.Tensor | None = None,
+ attention_mask: tf.Tensor | None = None,
+ decoder_input_ids: tf.Tensor | None = None,
+ decoder_attention_mask: tf.Tensor | None = None,
+ decoder_position_ids: tf.Tensor | None = None,
+ head_mask: tf.Tensor | None = None,
+ decoder_head_mask: tf.Tensor | None = None,
+ cross_attn_head_mask: tf.Tensor | None = None,
+ encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
+ past_key_values: List[tf.Tensor] | None = None,
+ inputs_embeds: tf.Tensor | None = None,
+ decoder_inputs_embeds: tf.Tensor | None = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ **kwargs,
+ ) -> Union[Tuple[tf.Tensor], TFSeq2SeqModelOutput]:
+ outputs = self.model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ decoder_position_ids=decoder_position_ids,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ encoder_outputs=encoder_outputs,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ decoder_inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return outputs
+
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartModel.serving_output
+ def serving_output(self, output):
+ pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
+ dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
+ dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
+ cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
+ enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
+ enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
+
+ return TFSeq2SeqModelOutput(
+ last_hidden_state=output.last_hidden_state,
+ past_key_values=pkv,
+ decoder_hidden_states=dec_hs,
+ decoder_attentions=dec_attns,
+ cross_attentions=cross_attns,
+ encoder_last_hidden_state=output.encoder_last_hidden_state,
+ encoder_hidden_states=enc_hs,
+ encoder_attentions=enc_attns,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "model", None) is not None:
+ with tf.name_scope(self.model.name):
+ self.model.build(None)
+
+
+# Copied from transformers.models.bart.modeling_tf_bart.BiasLayer
+class BiasLayer(keras.layers.Layer):
+ """
+ Bias as a layer. It is used for serialization purposes: `keras.Model.save_weights` stores on a per-layer basis,
+ so all weights have to be registered in a layer.
+ """
+
+ def __init__(self, shape, initializer, trainable, name, **kwargs):
+ super().__init__(name=name, **kwargs)
+ # Note: the name of this variable will NOT be scoped when serialized, i.e. it will not be in the format of
+ # "outer_layer/inner_layer/.../name:0". Instead, it will be "name:0". For further details, see:
+ # https://github.com/huggingface/transformers/pull/18833#issuecomment-1233090214
+ self.bias = self.add_weight(name=name, shape=shape, initializer=initializer, trainable=trainable)
+
+ def call(self, x):
+ return x + self.bias
+
+
+@add_start_docstrings(
+ "The BLENDERBOT Model with a language modeling head. Can be used for summarization.",
+ BLENDERBOT_START_DOCSTRING,
+)
+class TFBlenderbotForConditionalGeneration(TFBlenderbotPreTrainedModel, TFCausalLanguageModelingLoss):
+ _keys_to_ignore_on_load_unexpected = [
+ r"model.encoder.embed_tokens.weight",
+ r"model.decoder.embed_tokens.weight",
+ ]
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.model = TFBlenderbotMainLayer(config, name="model")
+ self.use_cache = config.use_cache
+ # final_bias_logits is registered as a buffer in pytorch, so not trainable for the sake of consistency.
+ self.bias_layer = BiasLayer(
+ name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False
+ )
+
+ def get_decoder(self):
+ return self.model.decoder
+
+ def get_encoder(self):
+ return self.model.encoder
+
+ def get_output_embeddings(self):
+ return self.get_input_embeddings()
+
+ def set_output_embeddings(self, value):
+ self.set_input_embeddings(value)
+
+ def get_bias(self):
+ return {"final_logits_bias": self.bias_layer.bias}
+
+ def set_bias(self, value):
+ # Replaces the existing layers containing bias for correct (de)serialization.
+ vocab_size = value["final_logits_bias"].shape[-1]
+ self.bias_layer = BiasLayer(
+ name="final_logits_bias", shape=[1, vocab_size], initializer="zeros", trainable=False
+ )
+ self.bias_layer.bias.assign(value["final_logits_bias"])
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs):
+ if pretrained_model_name_or_path == "facebook/blenderbot-90M":
+ from ..blenderbot_small import TFBlenderbotSmallForConditionalGeneration
+
+ warnings.warn(
+ "The checkpoint `facebook/blenderbot-90M` is deprecated. In the future, please use the identical"
+ " checkpoint `facebook/small_blenderbot-90M` with"
+ " `TFBlenderbotSmallForConditionalGeneration.from_pretrained('facebook/small_blenderbot-90M')`"
+ " instead.",
+ FutureWarning,
+ )
+ return TFBlenderbotSmallForConditionalGeneration.from_pretrained(pretrained_model_name_or_path)
+
+ return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(BLENDERBOT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+ @add_end_docstrings(BLENDERBOT_GENERATION_EXAMPLE)
+ def call(
+ self,
+ input_ids: tf.Tensor | None = None,
+ attention_mask: tf.Tensor | None = None,
+ decoder_input_ids: tf.Tensor | None = None,
+ decoder_attention_mask: tf.Tensor | None = None,
+ decoder_position_ids: tf.Tensor | None = None,
+ head_mask: tf.Tensor | None = None,
+ decoder_head_mask: tf.Tensor | None = None,
+ cross_attn_head_mask: tf.Tensor | None = None,
+ encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
+ past_key_values: List[tf.Tensor] | None = None,
+ inputs_embeds: tf.Tensor | None = None,
+ decoder_inputs_embeds: tf.Tensor | None = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[Tuple[tf.Tensor], TFSeq2SeqLMOutput]:
+ r"""
+ labels (`tf.tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ Returns:
+
+ """
+ if labels is not None:
+ labels = tf.where(
+ labels == self.config.pad_token_id,
+ tf.cast(tf.fill(shape_list(labels), -100), labels.dtype),
+ labels,
+ )
+ use_cache = False
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
+ decoder_input_ids = shift_tokens_right(
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
+ )
+
+ outputs = self.model(
+ input_ids,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ encoder_outputs=encoder_outputs,
+ decoder_attention_mask=decoder_attention_mask,
+ decoder_position_ids=decoder_position_ids,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ decoder_inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True)
+ lm_logits = self.bias_layer(lm_logits)
+ masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits)
+
+ if not return_dict:
+ output = (lm_logits,) + outputs[1:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+ return TFSeq2SeqLMOutput(
+ loss=masked_lm_loss,
+ logits=lm_logits,
+ past_key_values=outputs.past_key_values, # index 1 of d outputs
+ decoder_hidden_states=outputs.decoder_hidden_states, # index 2 of d outputs
+ decoder_attentions=outputs.decoder_attentions, # index 3 of d outputs
+ cross_attentions=outputs.cross_attentions, # index 4 of d outputs
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state, # index 0 of encoder outputs
+ encoder_hidden_states=outputs.encoder_hidden_states, # 1 of e out
+ encoder_attentions=outputs.encoder_attentions, # 2 of e out
+ )
+
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.serving_output
+ def serving_output(self, output):
+ pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
+ dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
+ dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
+ cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
+ enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
+ enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
+
+ return TFSeq2SeqLMOutput(
+ logits=output.logits,
+ past_key_values=pkv,
+ decoder_hidden_states=dec_hs,
+ decoder_attentions=dec_attns,
+ cross_attentions=cross_attns,
+ encoder_last_hidden_state=output.encoder_last_hidden_state,
+ encoder_hidden_states=enc_hs,
+ encoder_attentions=enc_attns,
+ )
+
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.prepare_inputs_for_generation
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ decoder_attention_mask=None,
+ head_mask=None,
+ decoder_head_mask=None,
+ cross_attn_head_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ # cut decoder_input_ids if past_key_values is used
+ if past_key_values is not None:
+ decoder_input_ids = decoder_input_ids[:, -1:]
+
+ if decoder_attention_mask is not None: # xla
+ decoder_position_ids = tf.math.cumsum(decoder_attention_mask, axis=-1, exclusive=True)[:, -1:]
+ elif past_key_values is not None: # no xla + past_key_values
+ decoder_position_ids = past_key_values[0][0].shape[2]
+ else: # no xla + no past_key_values
+ decoder_position_ids = tf.range(decoder_input_ids.shape[1])
+
+ return {
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
+ "encoder_outputs": encoder_outputs,
+ "past_key_values": past_key_values,
+ "decoder_input_ids": decoder_input_ids,
+ "attention_mask": attention_mask,
+ "decoder_attention_mask": decoder_attention_mask,
+ "decoder_position_ids": decoder_position_ids,
+ "head_mask": head_mask,
+ "decoder_head_mask": decoder_head_mask,
+ "cross_attn_head_mask": cross_attn_head_mask,
+ "use_cache": use_cache, # change this to avoid caching (presumably for debugging)
+ }
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "model", None) is not None:
+ with tf.name_scope(self.model.name):
+ self.model.build(None)
+ if getattr(self, "bias_layer", None) is not None:
+ with tf.name_scope(self.bias_layer.name):
+ self.bias_layer.build(None)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/tokenization_blenderbot.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/tokenization_blenderbot.py
new file mode 100644
index 0000000000000000000000000000000000000000..29386c1233adf0e1968566c799b3843bb3ffa8cf
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/tokenization_blenderbot.py
@@ -0,0 +1,439 @@
+# coding=utf-8
+# Copyright 2021 The Facebook Inc. and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization class for Blenderbot."""
+
+import json
+import os
+from functools import lru_cache
+from typing import List, Optional, Tuple
+
+import regex as re
+
+from ...tokenization_utils import AddedToken, PreTrainedTokenizer
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+VOCAB_FILES_NAMES = {
+ "vocab_file": "vocab.json",
+ "merges_file": "merges.txt",
+ "tokenizer_config_file": "tokenizer_config.json",
+}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
+ "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
+ "tokenizer_config_file": {
+ "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
+ },
+}
+
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"facebook/blenderbot-3B": 128}
+
+
+@lru_cache()
+# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
+def bytes_to_unicode():
+ """
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
+ characters the bpe code barfs on.
+
+ The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
+ if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
+ decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
+ tables between utf-8 bytes and unicode strings.
+ """
+ bs = (
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
+ )
+ cs = bs[:]
+ n = 0
+ for b in range(2**8):
+ if b not in bs:
+ bs.append(b)
+ cs.append(2**8 + n)
+ n += 1
+ cs = [chr(n) for n in cs]
+ return dict(zip(bs, cs))
+
+
+# Copied from transformers.models.roberta.tokenization_roberta.get_pairs
+def get_pairs(word):
+ """
+ Return set of symbol pairs in a word.
+
+ Word is represented as tuple of symbols (symbols being variable-length strings).
+ """
+ pairs = set()
+ prev_char = word[0]
+ for char in word[1:]:
+ pairs.add((prev_char, char))
+ prev_char = char
+ return pairs
+
+
+class BlenderbotTokenizer(PreTrainedTokenizer):
+ """
+ Constructs a Blenderbot tokenizer, derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding.
+
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
+
+ ```python
+ >>> from transformers import BlenderbotTokenizer
+
+ >>> tokenizer = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B")
+ >>> tokenizer.add_prefix_space = False
+ >>> tokenizer("Hello world")["input_ids"]
+ [47, 921, 86, 1085, 2]
+
+ >>> tokenizer(" Hello world")["input_ids"]
+ [6950, 1085, 2]
+ ```
+
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
+ call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
+
+
+
+ When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
+
+
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ merges_file (`str`):
+ Path to the merges file.
+ errors (`str`, *optional*, defaults to `"replace"`):
+ Paradigm to follow when decoding bytes to UTF-8. See
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
+ The token used is the `sep_token`.
+
+
+
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ cls_token (`str`, *optional*, defaults to `""`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ mask_token (`str`, *optional*, defaults to `""`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
+ other word. (Blenderbot tokenizer detect beginning of words by the preceding space).
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.__init__ with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def __init__(
+ self,
+ vocab_file,
+ merges_file,
+ errors="replace",
+ bos_token="",
+ eos_token="",
+ sep_token="",
+ cls_token="",
+ unk_token="",
+ pad_token="",
+ mask_token="",
+ add_prefix_space=False,
+ **kwargs,
+ ):
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
+ sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
+ cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
+
+ # Mask token behave like a normal word, i.e. include the space before it
+ mask_token = (
+ AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False)
+ if isinstance(mask_token, str)
+ else mask_token
+ )
+
+ # these special tokens are not part of the vocab.json, let's add them in the correct order
+
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
+ self.encoder = json.load(vocab_handle)
+ self.decoder = {v: k for k, v in self.encoder.items()}
+ self.errors = errors # how to handle errors in decoding
+ self.byte_encoder = bytes_to_unicode()
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
+ with open(merges_file, encoding="utf-8") as merges_handle:
+ bpe_merges = merges_handle.read().split("\n")[1:-1]
+ bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
+ self.cache = {}
+ self.add_prefix_space = add_prefix_space
+
+ # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
+ self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
+
+ super().__init__(
+ errors=errors,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ cls_token=cls_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ add_prefix_space=add_prefix_space,
+ **kwargs,
+ )
+
+ @property
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def vocab_size(self):
+ return len(self.encoder)
+
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.get_vocab with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def get_vocab(self):
+ vocab = dict(self.encoder).copy()
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.bpe with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def bpe(self, token):
+ if token in self.cache:
+ return self.cache[token]
+ word = tuple(token)
+ pairs = get_pairs(word)
+
+ if not pairs:
+ return token
+
+ while True:
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
+ if bigram not in self.bpe_ranks:
+ break
+ first, second = bigram
+ new_word = []
+ i = 0
+ while i < len(word):
+ try:
+ j = word.index(first, i)
+ except ValueError:
+ new_word.extend(word[i:])
+ break
+ else:
+ new_word.extend(word[i:j])
+ i = j
+
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
+ new_word.append(first + second)
+ i += 2
+ else:
+ new_word.append(word[i])
+ i += 1
+ new_word = tuple(new_word)
+ word = new_word
+ if len(word) == 1:
+ break
+ else:
+ pairs = get_pairs(word)
+ word = " ".join(word)
+ self.cache[token] = word
+ return word
+
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer._tokenize with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def _tokenize(self, text):
+ """Tokenize a string."""
+ bpe_tokens = []
+ for token in re.findall(self.pat, text):
+ token = "".join(
+ self.byte_encoder[b] for b in token.encode("utf-8")
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
+ bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
+ return bpe_tokens
+
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer._convert_token_to_id with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
+
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer._convert_id_to_token with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.decoder.get(index)
+
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.convert_tokens_to_string with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ text = "".join(tokens)
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
+ return text
+
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.save_vocabulary with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+ merge_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
+ )
+
+ with open(vocab_file, "w", encoding="utf-8") as f:
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
+
+ index = 0
+ with open(merge_file, "w", encoding="utf-8") as writer:
+ writer.write("#version: 0.2\n")
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
+ if index != token_index:
+ logger.warning(
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
+ " Please check that the tokenizer is not corrupted!"
+ )
+ index = token_index
+ writer.write(" ".join(bpe_tokens) + "\n")
+ index += 1
+
+ return vocab_file, merge_file
+
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.get_special_tokens_mask with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is None:
+ return [1] + ([0] * len(token_ids_0)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
+
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.create_token_type_ids_from_sequences with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. Blenderbot does not
+ make use of token type ids, therefore a list of zeros is returned.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of zeros.
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
+
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.prepare_for_tokenization with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
+ add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
+ if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()):
+ text = " " + text
+ return (text, kwargs)
+
+ def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None):
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A Blenderbot sequence has the following format:
+ - single sequence: ` X `
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added
+ token_ids_1 (`List[int]`, *optional*):
+ Will be ignored
+ Returns:
+ `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ return token_ids_0 + [self.eos_token_id]
+
+ @property
+ def default_chat_template(self):
+ """
+ A very simple chat template that just adds whitespace between messages.
+ """
+ logger.warning_once(
+ "\nNo chat template is defined for this tokenizer - using the default template "
+ f"for the {self.__class__.__name__} class. If the default is not appropriate for "
+ "your model, please set `tokenizer.chat_template` to an appropriate template. "
+ "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
+ )
+ return (
+ "{% for message in messages %}"
+ "{% if message['role'] == 'user' %}{{ ' ' }}{% endif %}"
+ "{{ message['content'] }}"
+ "{% if not loop.last %}{{ ' ' }}{% endif %}"
+ "{% endfor %}"
+ "{{ eos_token }}"
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/tokenization_blenderbot_fast.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/tokenization_blenderbot_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..6245025b503d539b6f1043429f0f67486df391a4
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot/tokenization_blenderbot_fast.py
@@ -0,0 +1,321 @@
+# coding=utf-8
+# Copyright 2021 The Facebook Inc. and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Fast Tokenization class for Blenderbot."""
+import json
+from typing import List, Optional, Tuple
+
+from tokenizers import pre_tokenizers, processors
+
+from ...tokenization_utils_base import AddedToken, BatchEncoding
+from ...tokenization_utils_fast import PreTrainedTokenizerFast
+from ...utils import logging
+from .tokenization_blenderbot import BlenderbotTokenizer
+
+
+logger = logging.get_logger(__name__)
+
+
+VOCAB_FILES_NAMES = {
+ "vocab_file": "vocab.json",
+ "merges_file": "merges.txt",
+ "tokenizer_config_file": "tokenizer_config.json",
+}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
+ "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
+ "tokenizer_config_file": {
+ "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
+ },
+}
+
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"facebook/blenderbot-3B": 128}
+
+
+class BlenderbotTokenizerFast(PreTrainedTokenizerFast):
+ """
+ Construct a "fast" Blenderbot tokenizer (backed by HuggingFace's *tokenizers* library), derived from the GPT-2
+ tokenizer, using byte-level Byte-Pair-Encoding.
+
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
+
+ ```python
+ >>> from transformers import BlenderbotTokenizerFast
+
+ >>> tokenizer = BlenderbotTokenizerFast.from_pretrained("facebook/blenderbot-3B")
+ >>> tokenizer("Hello world")["input_ids"]
+ [6950, 1085, 2]
+
+ >>> tokenizer(" Hello world")["input_ids"]
+ [6950, 1085, 2]
+ ```
+
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
+ call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
+
+
+
+ When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
+
+
+
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
+ refer to this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ merges_file (`str`):
+ Path to the merges file.
+ errors (`str`, *optional*, defaults to `"replace"`):
+ Paradigm to follow when decoding bytes to UTF-8. See
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
+ The token used is the `sep_token`.
+
+
+
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ cls_token (`str`, *optional*, defaults to `""`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ mask_token (`str`, *optional*, defaults to `""`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
+ other word. (Blenderbot tokenizer detect beginning of words by the preceding space).
+ trim_offsets (`bool`, *optional*, defaults to `True`):
+ Whether the post processing step should trim offsets to avoid including whitespaces.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ model_input_names = ["input_ids", "attention_mask"]
+ slow_tokenizer_class = BlenderbotTokenizer
+
+ # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.__init__ with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def __init__(
+ self,
+ vocab_file=None,
+ merges_file=None,
+ tokenizer_file=None,
+ errors="replace",
+ bos_token="",
+ eos_token="",
+ sep_token="",
+ cls_token="",
+ unk_token="",
+ pad_token="",
+ mask_token="",
+ add_prefix_space=False,
+ trim_offsets=True,
+ **kwargs,
+ ):
+ mask_token = (
+ AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False)
+ if isinstance(mask_token, str)
+ else mask_token
+ )
+ super().__init__(
+ vocab_file,
+ merges_file,
+ tokenizer_file=tokenizer_file,
+ errors=errors,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ sep_token=sep_token,
+ cls_token=cls_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ add_prefix_space=add_prefix_space,
+ trim_offsets=trim_offsets,
+ **kwargs,
+ )
+
+ pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
+ if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
+ pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type"))
+ pre_tok_state["add_prefix_space"] = add_prefix_space
+ self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
+
+ self.add_prefix_space = add_prefix_space
+
+ tokenizer_component = "post_processor"
+ tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None)
+ if tokenizer_component_instance:
+ state = json.loads(tokenizer_component_instance.__getstate__())
+
+ # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
+ if "sep" in state:
+ state["sep"] = tuple(state["sep"])
+ if "cls" in state:
+ state["cls"] = tuple(state["cls"])
+
+ changes_to_apply = False
+
+ if state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
+ state["add_prefix_space"] = add_prefix_space
+ changes_to_apply = True
+
+ if state.get("trim_offsets", trim_offsets) != trim_offsets:
+ state["trim_offsets"] = trim_offsets
+ changes_to_apply = True
+
+ if changes_to_apply:
+ component_class = getattr(processors, state.pop("type"))
+ new_value = component_class(**state)
+ setattr(self.backend_tokenizer, tokenizer_component, new_value)
+
+ @property
+ # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def mask_token(self) -> str:
+ """
+ `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not
+ having been set.
+
+ Blenderbot tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will greedily
+ comprise the space before the **.
+ """
+ if self._mask_token is None:
+ if self.verbose:
+ logger.error("Using mask_token, but it is not set yet.")
+ return None
+ return str(self._mask_token)
+
+ @mask_token.setter
+ def mask_token(self, value):
+ """
+ Overriding the default behavior of the mask token to have it eat the space before it.
+
+ This is needed to preserve backward compatibility with all the previously used models based on Roberta.
+ """
+ # Mask token behave like a normal word, i.e. include the space before it
+ # So we set lstrip to True
+ value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value
+ self._mask_token = value
+
+ # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast._batch_encode_plus with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
+ is_split_into_words = kwargs.get("is_split_into_words", False)
+ assert self.add_prefix_space or not is_split_into_words, (
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
+ "to use it with pretokenized inputs."
+ )
+
+ return super()._batch_encode_plus(*args, **kwargs)
+
+ # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast._encode_plus with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
+ is_split_into_words = kwargs.get("is_split_into_words", False)
+
+ assert self.add_prefix_space or not is_split_into_words, (
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
+ "to use it with pretokenized inputs."
+ )
+
+ return super()._encode_plus(*args, **kwargs)
+
+ # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.save_vocabulary with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
+ return tuple(files)
+
+ # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.create_token_type_ids_from_sequences with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. Blenderbot does not
+ make use of token type ids, therefore a list of zeros is returned.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of zeros.
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
+
+ def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None):
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A Blenderbot sequence has the following format:
+ - single sequence: ` X `
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added
+ token_ids_1 (`List[int]`, *optional*):
+ Will be ignored
+ Returns:
+ `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ return token_ids_0 + [self.eos_token_id]
+
+ @property
+ # Copied from transformers.models.blenderbot.tokenization_blenderbot.BlenderbotTokenizer.default_chat_template
+ def default_chat_template(self):
+ """
+ A very simple chat template that just adds whitespace between messages.
+ """
+ logger.warning_once(
+ "\nNo chat template is defined for this tokenizer - using the default template "
+ f"for the {self.__class__.__name__} class. If the default is not appropriate for "
+ "your model, please set `tokenizer.chat_template` to an appropriate template. "
+ "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
+ )
+ return (
+ "{% for message in messages %}"
+ "{% if message['role'] == 'user' %}{{ ' ' }}{% endif %}"
+ "{{ message['content'] }}"
+ "{% if not loop.last %}{{ ' ' }}{% endif %}"
+ "{% endfor %}"
+ "{{ eos_token }}"
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ba7b9b0dde58a07d91c44692e4a38720b4618a8c
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/configuration_blenderbot_small.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/configuration_blenderbot_small.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..79bb86052a4350746692a29e76ec193dfee75968
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/configuration_blenderbot_small.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_blenderbot_small.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_blenderbot_small.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8315f14d40487132862a0c94ea53e21c49b7fe81
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_blenderbot_small.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_flax_blenderbot_small.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_flax_blenderbot_small.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8aa3d6420a25a591efe62007e8a6716f52e9c995
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_flax_blenderbot_small.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_tf_blenderbot_small.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_tf_blenderbot_small.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c936fcff25c110af959c6cea2bc307a51d4eec16
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/modeling_tf_blenderbot_small.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/tokenization_blenderbot_small.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/tokenization_blenderbot_small.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b129a3ac4f0404d1f878908cdac8e33d8e6fdb2c
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/tokenization_blenderbot_small.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/tokenization_blenderbot_small_fast.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/tokenization_blenderbot_small_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1e16ed3aecf1d5137f50af9f22e9d20dbe23b44b
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/blenderbot_small/__pycache__/tokenization_blenderbot_small_fast.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f26481abb4f24554d96c32d45fd97031badaa8d8
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/configuration_convnextv2.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/configuration_convnextv2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..513588cfc61e2e5f5e4a8d7cd43818e856a4d655
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/configuration_convnextv2.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/convert_convnextv2_to_pytorch.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/convert_convnextv2_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..994f0101019650b4bf916c56b45cd29ffc7076b0
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/convert_convnextv2_to_pytorch.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/modeling_convnextv2.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/modeling_convnextv2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8f8737fdb0b78821b1d4db7b03d7278a4d892370
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/modeling_convnextv2.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/modeling_tf_convnextv2.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/modeling_tf_convnextv2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..63149ac1daa5f75d2a4d6a0b7f5f987a10fed788
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/modeling_tf_convnextv2.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6a2756eb9d1c269e08446f9328120738196349d0
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/__init__.py
@@ -0,0 +1,166 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_flax_available,
+ is_tf_available,
+ is_tokenizers_available,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "configuration_distilbert": [
+ "DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
+ "DistilBertConfig",
+ "DistilBertOnnxConfig",
+ ],
+ "tokenization_distilbert": ["DistilBertTokenizer"],
+}
+
+try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_distilbert_fast"] = ["DistilBertTokenizerFast"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_distilbert"] = [
+ "DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "DistilBertForMaskedLM",
+ "DistilBertForMultipleChoice",
+ "DistilBertForQuestionAnswering",
+ "DistilBertForSequenceClassification",
+ "DistilBertForTokenClassification",
+ "DistilBertModel",
+ "DistilBertPreTrainedModel",
+ ]
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_distilbert"] = [
+ "TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TFDistilBertForMaskedLM",
+ "TFDistilBertForMultipleChoice",
+ "TFDistilBertForQuestionAnswering",
+ "TFDistilBertForSequenceClassification",
+ "TFDistilBertForTokenClassification",
+ "TFDistilBertMainLayer",
+ "TFDistilBertModel",
+ "TFDistilBertPreTrainedModel",
+ ]
+
+try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_flax_distilbert"] = [
+ "FlaxDistilBertForMaskedLM",
+ "FlaxDistilBertForMultipleChoice",
+ "FlaxDistilBertForQuestionAnswering",
+ "FlaxDistilBertForSequenceClassification",
+ "FlaxDistilBertForTokenClassification",
+ "FlaxDistilBertModel",
+ "FlaxDistilBertPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_distilbert import (
+ DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
+ DistilBertConfig,
+ DistilBertOnnxConfig,
+ )
+ from .tokenization_distilbert import DistilBertTokenizer
+
+ try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_distilbert_fast import DistilBertTokenizerFast
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_distilbert import (
+ DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ DistilBertForMaskedLM,
+ DistilBertForMultipleChoice,
+ DistilBertForQuestionAnswering,
+ DistilBertForSequenceClassification,
+ DistilBertForTokenClassification,
+ DistilBertModel,
+ DistilBertPreTrainedModel,
+ )
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_distilbert import (
+ TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TFDistilBertForMaskedLM,
+ TFDistilBertForMultipleChoice,
+ TFDistilBertForQuestionAnswering,
+ TFDistilBertForSequenceClassification,
+ TFDistilBertForTokenClassification,
+ TFDistilBertMainLayer,
+ TFDistilBertModel,
+ TFDistilBertPreTrainedModel,
+ )
+
+ try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_flax_distilbert import (
+ FlaxDistilBertForMaskedLM,
+ FlaxDistilBertForMultipleChoice,
+ FlaxDistilBertForQuestionAnswering,
+ FlaxDistilBertForSequenceClassification,
+ FlaxDistilBertForTokenClassification,
+ FlaxDistilBertModel,
+ FlaxDistilBertPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8823f2f6d5ab74146c3a92edf7ea721124447056
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/tokenization_distilbert.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/tokenization_distilbert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4b20697665caf35d2a2c6ec026f8687601fca1e0
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/__pycache__/tokenization_distilbert.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/configuration_distilbert.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/configuration_distilbert.py
new file mode 100644
index 0000000000000000000000000000000000000000..97b5b7c869064b6fa9cae4879012aec3b030360f
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/configuration_distilbert.py
@@ -0,0 +1,155 @@
+# coding=utf-8
+# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" DistilBERT model configuration"""
+from collections import OrderedDict
+from typing import Mapping
+
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
+ "distilbert-base-uncased-distilled-squad": (
+ "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
+ ),
+ "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
+ "distilbert-base-cased-distilled-squad": (
+ "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
+ ),
+ "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
+ "distilbert-base-multilingual-cased": (
+ "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
+ ),
+ "distilbert-base-uncased-finetuned-sst-2-english": (
+ "https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
+ ),
+}
+
+
+class DistilBertConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`DistilBertModel`] or a [`TFDistilBertModel`]. It
+ is used to instantiate a DistilBERT model according to the specified arguments, defining the model architecture.
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the DistilBERT
+ [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 30522):
+ Vocabulary size of the DistilBERT model. Defines the number of different tokens that can be represented by
+ the `inputs_ids` passed when calling [`DistilBertModel`] or [`TFDistilBertModel`].
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ sinusoidal_pos_embds (`boolean`, *optional*, defaults to `False`):
+ Whether to use sinusoidal positional embeddings.
+ n_layers (`int`, *optional*, defaults to 6):
+ Number of hidden layers in the Transformer encoder.
+ n_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ dim (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ hidden_dim (`int`, *optional*, defaults to 3072):
+ The size of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ activation (`str` or `Callable`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ qa_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probabilities used in the question answering model [`DistilBertForQuestionAnswering`].
+ seq_classif_dropout (`float`, *optional*, defaults to 0.2):
+ The dropout probabilities used in the sequence classification and the multiple choice model
+ [`DistilBertForSequenceClassification`].
+
+ Examples:
+
+ ```python
+ >>> from transformers import DistilBertConfig, DistilBertModel
+
+ >>> # Initializing a DistilBERT configuration
+ >>> configuration = DistilBertConfig()
+
+ >>> # Initializing a model (with random weights) from the configuration
+ >>> model = DistilBertModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "distilbert"
+ attribute_map = {
+ "hidden_size": "dim",
+ "num_attention_heads": "n_heads",
+ "num_hidden_layers": "n_layers",
+ }
+
+ def __init__(
+ self,
+ vocab_size=30522,
+ max_position_embeddings=512,
+ sinusoidal_pos_embds=False,
+ n_layers=6,
+ n_heads=12,
+ dim=768,
+ hidden_dim=4 * 768,
+ dropout=0.1,
+ attention_dropout=0.1,
+ activation="gelu",
+ initializer_range=0.02,
+ qa_dropout=0.1,
+ seq_classif_dropout=0.2,
+ pad_token_id=0,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.sinusoidal_pos_embds = sinusoidal_pos_embds
+ self.n_layers = n_layers
+ self.n_heads = n_heads
+ self.dim = dim
+ self.hidden_dim = hidden_dim
+ self.dropout = dropout
+ self.attention_dropout = attention_dropout
+ self.activation = activation
+ self.initializer_range = initializer_range
+ self.qa_dropout = qa_dropout
+ self.seq_classif_dropout = seq_classif_dropout
+ super().__init__(**kwargs, pad_token_id=pad_token_id)
+
+
+class DistilBertOnnxConfig(OnnxConfig):
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ if self.task == "multiple-choice":
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
+ else:
+ dynamic_axis = {0: "batch", 1: "sequence"}
+ return OrderedDict(
+ [
+ ("input_ids", dynamic_axis),
+ ("attention_mask", dynamic_axis),
+ ]
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/modeling_distilbert.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/modeling_distilbert.py
new file mode 100644
index 0000000000000000000000000000000000000000..023b4dc13ade1c67b32f7b1e51c7f3293abc3002
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/modeling_distilbert.py
@@ -0,0 +1,1392 @@
+# coding=utf-8
+# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ PyTorch DistilBERT model adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) and in
+ part from HuggingFace PyTorch version of Google AI Bert model (https://github.com/google-research/bert)
+"""
+
+
+import math
+from typing import Dict, List, Optional, Set, Tuple, Union
+
+import numpy as np
+import torch
+import torch.nn.functional as F
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import get_activation
+from ...configuration_utils import PretrainedConfig
+from ...integrations.deepspeed import is_deepspeed_zero3_enabled
+from ...modeling_outputs import (
+ BaseModelOutput,
+ MaskedLMOutput,
+ MultipleChoiceModelOutput,
+ QuestionAnsweringModelOutput,
+ SequenceClassifierOutput,
+ TokenClassifierOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_flash_attn_2_available,
+ is_flash_attn_greater_or_equal_2_10,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_distilbert import DistilBertConfig
+
+
+if is_flash_attn_2_available():
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
+
+
+logger = logging.get_logger(__name__)
+_CHECKPOINT_FOR_DOC = "distilbert-base-uncased"
+_CONFIG_FOR_DOC = "DistilBertConfig"
+
+DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "distilbert-base-uncased",
+ "distilbert-base-uncased-distilled-squad",
+ "distilbert-base-cased",
+ "distilbert-base-cased-distilled-squad",
+ "distilbert-base-german-cased",
+ "distilbert-base-multilingual-cased",
+ "distilbert-base-uncased-finetuned-sst-2-english",
+ # See all DistilBERT models at https://huggingface.co/models?filter=distilbert
+]
+
+
+# UTILS AND BUILDING BLOCKS OF THE ARCHITECTURE #
+
+
+# Copied from transformers.models.llama.modeling_llama._get_unpad_data
+def _get_unpad_data(attention_mask):
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
+ return (
+ indices,
+ cu_seqlens,
+ max_seqlen_in_batch,
+ )
+
+
+def create_sinusoidal_embeddings(n_pos: int, dim: int, out: torch.Tensor):
+ if is_deepspeed_zero3_enabled():
+ import deepspeed
+
+ with deepspeed.zero.GatheredParameters(out, modifier_rank=0):
+ if torch.distributed.get_rank() == 0:
+ _create_sinusoidal_embeddings(n_pos=n_pos, dim=dim, out=out)
+ else:
+ _create_sinusoidal_embeddings(n_pos=n_pos, dim=dim, out=out)
+
+
+def _create_sinusoidal_embeddings(n_pos: int, dim: int, out: torch.Tensor):
+ position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)])
+ out.requires_grad = False
+ out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
+ out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
+ out.detach_()
+
+
+class Embeddings(nn.Module):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__()
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.dim, padding_idx=config.pad_token_id)
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.dim)
+ if config.sinusoidal_pos_embds:
+ create_sinusoidal_embeddings(
+ n_pos=config.max_position_embeddings, dim=config.dim, out=self.position_embeddings.weight
+ )
+
+ self.LayerNorm = nn.LayerNorm(config.dim, eps=1e-12)
+ self.dropout = nn.Dropout(config.dropout)
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
+ )
+
+ def forward(self, input_ids: torch.Tensor, input_embeds: Optional[torch.Tensor] = None) -> torch.Tensor:
+ """
+ Parameters:
+ input_ids (torch.Tensor):
+ torch.tensor(bs, max_seq_length) The token ids to embed.
+ input_embeds (*optional*, torch.Tensor):
+ The pre-computed word embeddings. Can only be passed if the input ids are `None`.
+
+
+ Returns: torch.tensor(bs, max_seq_length, dim) The embedded tokens (plus position embeddings, no token_type
+ embeddings)
+ """
+ if input_ids is not None:
+ input_embeds = self.word_embeddings(input_ids) # (bs, max_seq_length, dim)
+
+ seq_length = input_embeds.size(1)
+
+ # Setting the position-ids to the registered buffer in constructor, it helps
+ # when tracing the model without passing position-ids, solves
+ # isues similar to issue #5664
+ if hasattr(self, "position_ids"):
+ position_ids = self.position_ids[:, :seq_length]
+ else:
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) # (max_seq_length)
+ position_ids = position_ids.unsqueeze(0).expand_as(input_ids) # (bs, max_seq_length)
+
+ position_embeddings = self.position_embeddings(position_ids) # (bs, max_seq_length, dim)
+
+ embeddings = input_embeds + position_embeddings # (bs, max_seq_length, dim)
+ embeddings = self.LayerNorm(embeddings) # (bs, max_seq_length, dim)
+ embeddings = self.dropout(embeddings) # (bs, max_seq_length, dim)
+ return embeddings
+
+
+class MultiHeadSelfAttention(nn.Module):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__()
+ self.config = config
+
+ self.n_heads = config.n_heads
+ self.dim = config.dim
+ self.dropout = nn.Dropout(p=config.attention_dropout)
+ self.is_causal = False
+
+ # Have an even number of multi heads that divide the dimensions
+ if self.dim % self.n_heads != 0:
+ # Raise value errors for even multi-head attention nodes
+ raise ValueError(f"self.n_heads: {self.n_heads} must divide self.dim: {self.dim} evenly")
+
+ self.q_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
+ self.k_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
+ self.v_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
+ self.out_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
+
+ self.pruned_heads: Set[int] = set()
+ self.attention_head_size = self.dim // self.n_heads
+
+ def prune_heads(self, heads: List[int]):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.n_heads, self.attention_head_size, self.pruned_heads
+ )
+ # Prune linear layers
+ self.q_lin = prune_linear_layer(self.q_lin, index)
+ self.k_lin = prune_linear_layer(self.k_lin, index)
+ self.v_lin = prune_linear_layer(self.v_lin, index)
+ self.out_lin = prune_linear_layer(self.out_lin, index, dim=1)
+ # Update hyper params
+ self.n_heads = self.n_heads - len(heads)
+ self.dim = self.attention_head_size * self.n_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ query: torch.Tensor,
+ key: torch.Tensor,
+ value: torch.Tensor,
+ mask: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, ...]:
+ """
+ Parameters:
+ query: torch.tensor(bs, seq_length, dim)
+ key: torch.tensor(bs, seq_length, dim)
+ value: torch.tensor(bs, seq_length, dim)
+ mask: torch.tensor(bs, seq_length)
+
+ Returns:
+ weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs,
+ seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
+ """
+ bs, q_length, dim = query.size()
+ k_length = key.size(1)
+ # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'
+ # assert key.size() == value.size()
+
+ dim_per_head = self.dim // self.n_heads
+
+ mask_reshp = (bs, 1, 1, k_length)
+
+ def shape(x: torch.Tensor) -> torch.Tensor:
+ """separate heads"""
+ return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2)
+
+ def unshape(x: torch.Tensor) -> torch.Tensor:
+ """group heads"""
+ return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head)
+
+ q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head)
+ k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head)
+ v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head)
+
+ q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_length, dim_per_head)
+ scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, q_length, k_length)
+ mask = (mask == 0).view(mask_reshp).expand_as(scores) # (bs, n_heads, q_length, k_length)
+ scores = scores.masked_fill(
+ mask, torch.tensor(torch.finfo(scores.dtype).min)
+ ) # (bs, n_heads, q_length, k_length)
+
+ weights = nn.functional.softmax(scores, dim=-1) # (bs, n_heads, q_length, k_length)
+ weights = self.dropout(weights) # (bs, n_heads, q_length, k_length)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ weights = weights * head_mask
+
+ context = torch.matmul(weights, v) # (bs, n_heads, q_length, dim_per_head)
+ context = unshape(context) # (bs, q_length, dim)
+ context = self.out_lin(context) # (bs, q_length, dim)
+
+ if output_attentions:
+ return (context, weights)
+ else:
+ return (context,)
+
+
+class DistilBertFlashAttention2(MultiHeadSelfAttention):
+ """
+ DistilBert flash attention module. This module inherits from `MultiHeadSelfAttention` as the weights of the module
+ stays untouched. The only required change would be on the forward pass where it needs to correctly call the public
+ API of flash attention and deal with padding tokens in case the input contains any of them.
+ """
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
+
+ def forward(
+ self,
+ query: torch.Tensor,
+ key: torch.Tensor,
+ value: torch.Tensor,
+ mask: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, ...]:
+ """
+ Parameters:
+ query: torch.tensor(bs, seq_length, dim)
+ key: torch.tensor(bs, seq_length, dim)
+ value: torch.tensor(bs, seq_length, dim)
+ mask: torch.tensor(bs, seq_length)
+
+ Returns:
+ weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs,
+ seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
+ """
+ batch_size, q_length, dim = query.size()
+
+ dim_per_head = self.dim // self.n_heads
+
+ def reshape(x: torch.Tensor) -> torch.Tensor:
+ """separate heads"""
+ return x.view(batch_size, -1, self.n_heads, dim_per_head)
+
+ # Flash attention requires the input to have the shape
+ # batch_size x seq_length x head_dim x hidden_dim
+ query_states = reshape(self.q_lin(query))
+ key_states = reshape(self.k_lin(key))
+ value_states = reshape(self.v_lin(value))
+
+ attn_dropout = self.config.attention_dropout if self.training else 0.0
+
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
+ # cast them back in the correct dtype just to be sure everything works as expected.
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
+ # in fp32. (LlamaRMSNorm handles it correctly)
+
+ if query_states.dtype == torch.float32:
+ if torch.is_autocast_enabled():
+ target_dtype = torch.get_autocast_gpu_dtype()
+ # Handle the case where the model is quantized
+ elif hasattr(self.config, "_pre_quantization_dtype"):
+ target_dtype = self.config._pre_quantization_dtype
+ else:
+ target_dtype = self.q_lin.weight.dtype
+
+ logger.warning_once(
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
+ f" {target_dtype}."
+ )
+
+ query_states = query_states.to(target_dtype)
+ key_states = key_states.to(target_dtype)
+ value_states = value_states.to(target_dtype)
+
+ attn_weights = self._flash_attention_forward(
+ query_states, key_states, value_states, mask, q_length, dropout=attn_dropout
+ )
+
+ attn_weights_reshaped = attn_weights.reshape(batch_size, q_length, self.n_heads * dim_per_head)
+ attn_output = self.out_lin(attn_weights_reshaped)
+
+ if output_attentions:
+ return (attn_output, attn_weights)
+ else:
+ return (attn_output,)
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward with causal=True->causal=False
+ def _flash_attention_forward(
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
+ ):
+ """
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
+ first unpad the input, then computes the attention scores and pad the final attention scores.
+
+ Args:
+ query_states (`torch.Tensor`):
+ Input query states to be passed to Flash Attention API
+ key_states (`torch.Tensor`):
+ Input key states to be passed to Flash Attention API
+ value_states (`torch.Tensor`):
+ Input value states to be passed to Flash Attention API
+ attention_mask (`torch.Tensor`):
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
+ position of padding tokens and 1 for the position of non-padding tokens.
+ dropout (`float`):
+ Attention dropout
+ softmax_scale (`float`, *optional*):
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
+ """
+ if not self._flash_attn_uses_top_left_mask:
+ causal = self.is_causal
+ else:
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
+ causal = self.is_causal and query_length != 1
+
+ # Contains at least one padding token in the sequence
+ if attention_mask is not None:
+ batch_size = query_states.shape[0]
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
+ query_states, key_states, value_states, attention_mask, query_length
+ )
+
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
+
+ attn_output_unpad = flash_attn_varlen_func(
+ query_states,
+ key_states,
+ value_states,
+ cu_seqlens_q=cu_seqlens_q,
+ cu_seqlens_k=cu_seqlens_k,
+ max_seqlen_q=max_seqlen_in_batch_q,
+ max_seqlen_k=max_seqlen_in_batch_k,
+ dropout_p=dropout,
+ softmax_scale=softmax_scale,
+ causal=causal,
+ )
+
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
+ else:
+ attn_output = flash_attn_func(
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
+ )
+
+ return attn_output
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input with num_heads->n_heads
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
+
+ key_layer = index_first_axis(
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
+ )
+ value_layer = index_first_axis(
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
+ )
+ if query_length == kv_seq_len:
+ query_layer = index_first_axis(
+ query_layer.reshape(batch_size * kv_seq_len, self.n_heads, head_dim), indices_k
+ )
+ cu_seqlens_q = cu_seqlens_k
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
+ indices_q = indices_k
+ elif query_length == 1:
+ max_seqlen_in_batch_q = 1
+ cu_seqlens_q = torch.arange(
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
+ ) # There is a memcpy here, that is very bad.
+ indices_q = cu_seqlens_q[:-1]
+ query_layer = query_layer.squeeze(1)
+ else:
+ # The -q_len: slice assumes left padding.
+ attention_mask = attention_mask[:, -query_length:]
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
+
+ return (
+ query_layer,
+ key_layer,
+ value_layer,
+ indices_q,
+ (cu_seqlens_q, cu_seqlens_k),
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
+ )
+
+
+class FFN(nn.Module):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__()
+ self.dropout = nn.Dropout(p=config.dropout)
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.lin1 = nn.Linear(in_features=config.dim, out_features=config.hidden_dim)
+ self.lin2 = nn.Linear(in_features=config.hidden_dim, out_features=config.dim)
+ self.activation = get_activation(config.activation)
+
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
+ return apply_chunking_to_forward(self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, input)
+
+ def ff_chunk(self, input: torch.Tensor) -> torch.Tensor:
+ x = self.lin1(input)
+ x = self.activation(x)
+ x = self.lin2(x)
+ x = self.dropout(x)
+ return x
+
+
+DISTILBERT_ATTENTION_CLASSES = {
+ "eager": MultiHeadSelfAttention,
+ "flash_attention_2": DistilBertFlashAttention2,
+}
+
+
+class TransformerBlock(nn.Module):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__()
+
+ # Have an even number of Configure multi-heads
+ if config.dim % config.n_heads != 0:
+ raise ValueError(f"config.n_heads {config.n_heads} must divide config.dim {config.dim} evenly")
+
+ self.attention = DISTILBERT_ATTENTION_CLASSES[config._attn_implementation](config)
+ self.sa_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)
+
+ self.ffn = FFN(config)
+ self.output_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)
+
+ def forward(
+ self,
+ x: torch.Tensor,
+ attn_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, ...]:
+ """
+ Parameters:
+ x: torch.tensor(bs, seq_length, dim)
+ attn_mask: torch.tensor(bs, seq_length)
+
+ Returns:
+ sa_weights: torch.tensor(bs, n_heads, seq_length, seq_length) The attention weights ffn_output:
+ torch.tensor(bs, seq_length, dim) The output of the transformer block contextualization.
+ """
+ # Self-Attention
+ sa_output = self.attention(
+ query=x,
+ key=x,
+ value=x,
+ mask=attn_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ )
+ if output_attentions:
+ sa_output, sa_weights = sa_output # (bs, seq_length, dim), (bs, n_heads, seq_length, seq_length)
+ else: # To handle these `output_attentions` or `output_hidden_states` cases returning tuples
+ if type(sa_output) != tuple:
+ raise TypeError(f"sa_output must be a tuple but it is {type(sa_output)} type")
+
+ sa_output = sa_output[0]
+ sa_output = self.sa_layer_norm(sa_output + x) # (bs, seq_length, dim)
+
+ # Feed Forward Network
+ ffn_output = self.ffn(sa_output) # (bs, seq_length, dim)
+ ffn_output: torch.Tensor = self.output_layer_norm(ffn_output + sa_output) # (bs, seq_length, dim)
+
+ output = (ffn_output,)
+ if output_attentions:
+ output = (sa_weights,) + output
+ return output
+
+
+class Transformer(nn.Module):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__()
+ self.n_layers = config.n_layers
+ self.layer = nn.ModuleList([TransformerBlock(config) for _ in range(config.n_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ x: torch.Tensor,
+ attn_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: Optional[bool] = None,
+ ) -> Union[BaseModelOutput, Tuple[torch.Tensor, ...]]: # docstyle-ignore
+ """
+ Parameters:
+ x: torch.tensor(bs, seq_length, dim) Input sequence embedded.
+ attn_mask: torch.tensor(bs, seq_length) Attention mask on the sequence.
+
+ Returns:
+ hidden_state: torch.tensor(bs, seq_length, dim) Sequence of hidden states in the last (top)
+ layer all_hidden_states: Tuple[torch.tensor(bs, seq_length, dim)]
+ Tuple of length n_layers with the hidden states from each layer.
+ Optional: only if output_hidden_states=True
+ all_attentions: Tuple[torch.tensor(bs, n_heads, seq_length, seq_length)]
+ Tuple of length n_layers with the attention weights from each layer
+ Optional: only if output_attentions=True
+ """
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ hidden_state = x
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_state,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_state,
+ attn_mask,
+ head_mask[i],
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(
+ hidden_state,
+ attn_mask,
+ head_mask[i],
+ output_attentions,
+ )
+
+ hidden_state = layer_outputs[-1]
+
+ if output_attentions:
+ if len(layer_outputs) != 2:
+ raise ValueError(f"The length of the layer_outputs should be 2, but it is {len(layer_outputs)}")
+
+ attentions = layer_outputs[0]
+ all_attentions = all_attentions + (attentions,)
+ else:
+ if len(layer_outputs) != 1:
+ raise ValueError(f"The length of the layer_outputs should be 1, but it is {len(layer_outputs)}")
+
+ # Add last layer
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_state,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_state, all_hidden_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_state, hidden_states=all_hidden_states, attentions=all_attentions
+ )
+
+
+# INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL #
+class DistilBertPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = DistilBertConfig
+ load_tf_weights = None
+ base_model_prefix = "distilbert"
+ supports_gradient_checkpointing = True
+ _supports_flash_attn_2 = True
+
+ def _init_weights(self, module: nn.Module):
+ """Initialize the weights."""
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+DISTILBERT_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`DistilBertConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+DISTILBERT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare DistilBERT encoder/transformer outputting raw hidden-states without any specific head on top.",
+ DISTILBERT_START_DOCSTRING,
+)
+class DistilBertModel(DistilBertPreTrainedModel):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__(config)
+
+ self.embeddings = Embeddings(config) # Embeddings
+ self.transformer = Transformer(config) # Encoder
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_position_embeddings(self) -> nn.Embedding:
+ """
+ Returns the position embeddings
+ """
+ return self.embeddings.position_embeddings
+
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
+ """
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
+
+ Arguments:
+ new_num_position_embeddings (`int`):
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
+ the size will remove vectors from the end.
+ """
+ num_position_embeds_diff = new_num_position_embeddings - self.config.max_position_embeddings
+
+ # no resizing needs to be done if the length stays the same
+ if num_position_embeds_diff == 0:
+ return
+
+ logger.info(f"Setting `config.max_position_embeddings={new_num_position_embeddings}`...")
+ self.config.max_position_embeddings = new_num_position_embeddings
+
+ old_position_embeddings_weight = self.embeddings.position_embeddings.weight.clone()
+
+ self.embeddings.position_embeddings = nn.Embedding(self.config.max_position_embeddings, self.config.dim)
+
+ if self.config.sinusoidal_pos_embds:
+ create_sinusoidal_embeddings(
+ n_pos=self.config.max_position_embeddings, dim=self.config.dim, out=self.position_embeddings.weight
+ )
+ else:
+ with torch.no_grad():
+ if num_position_embeds_diff > 0:
+ self.embeddings.position_embeddings.weight[:-num_position_embeds_diff] = nn.Parameter(
+ old_position_embeddings_weight
+ )
+ else:
+ self.embeddings.position_embeddings.weight = nn.Parameter(
+ old_position_embeddings_weight[:num_position_embeds_diff]
+ )
+ # move position_embeddings to correct device
+ self.embeddings.position_embeddings.to(self.device)
+
+ def get_input_embeddings(self) -> nn.Embedding:
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, new_embeddings: nn.Embedding):
+ self.embeddings.word_embeddings = new_embeddings
+
+ def _prune_heads(self, heads_to_prune: Dict[int, List[List[int]]]):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.transformer.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[BaseModelOutput, Tuple[torch.Tensor, ...]]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ # Prepare head mask if needed
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ embeddings = self.embeddings(input_ids, inputs_embeds) # (bs, seq_length, dim)
+
+ if self._use_flash_attention_2:
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
+ else:
+ if attention_mask is None:
+ attention_mask = torch.ones(input_shape, device=device) # (bs, seq_length)
+
+ return self.transformer(
+ x=embeddings,
+ attn_mask=attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+
+@add_start_docstrings(
+ """DistilBert Model with a `masked language modeling` head on top.""",
+ DISTILBERT_START_DOCSTRING,
+)
+class DistilBertForMaskedLM(DistilBertPreTrainedModel):
+ _tied_weights_keys = ["vocab_projector.weight"]
+
+ def __init__(self, config: PretrainedConfig):
+ super().__init__(config)
+
+ self.activation = get_activation(config.activation)
+
+ self.distilbert = DistilBertModel(config)
+ self.vocab_transform = nn.Linear(config.dim, config.dim)
+ self.vocab_layer_norm = nn.LayerNorm(config.dim, eps=1e-12)
+ self.vocab_projector = nn.Linear(config.dim, config.vocab_size)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ self.mlm_loss_fct = nn.CrossEntropyLoss()
+
+ def get_position_embeddings(self) -> nn.Embedding:
+ """
+ Returns the position embeddings
+ """
+ return self.distilbert.get_position_embeddings()
+
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
+ """
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
+
+ Arguments:
+ new_num_position_embeddings (`int`):
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
+ the size will remove vectors from the end.
+ """
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
+
+ def get_output_embeddings(self) -> nn.Module:
+ return self.vocab_projector
+
+ def set_output_embeddings(self, new_embeddings: nn.Module):
+ self.vocab_projector = new_embeddings
+
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=MaskedLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[MaskedLMOutput, Tuple[torch.Tensor, ...]]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ dlbrt_output = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = dlbrt_output[0] # (bs, seq_length, dim)
+ prediction_logits = self.vocab_transform(hidden_states) # (bs, seq_length, dim)
+ prediction_logits = self.activation(prediction_logits) # (bs, seq_length, dim)
+ prediction_logits = self.vocab_layer_norm(prediction_logits) # (bs, seq_length, dim)
+ prediction_logits = self.vocab_projector(prediction_logits) # (bs, seq_length, vocab_size)
+
+ mlm_loss = None
+ if labels is not None:
+ mlm_loss = self.mlm_loss_fct(prediction_logits.view(-1, prediction_logits.size(-1)), labels.view(-1))
+
+ if not return_dict:
+ output = (prediction_logits,) + dlbrt_output[1:]
+ return ((mlm_loss,) + output) if mlm_loss is not None else output
+
+ return MaskedLMOutput(
+ loss=mlm_loss,
+ logits=prediction_logits,
+ hidden_states=dlbrt_output.hidden_states,
+ attentions=dlbrt_output.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
+ pooled output) e.g. for GLUE tasks.
+ """,
+ DISTILBERT_START_DOCSTRING,
+)
+class DistilBertForSequenceClassification(DistilBertPreTrainedModel):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.config = config
+
+ self.distilbert = DistilBertModel(config)
+ self.pre_classifier = nn.Linear(config.dim, config.dim)
+ self.classifier = nn.Linear(config.dim, config.num_labels)
+ self.dropout = nn.Dropout(config.seq_classif_dropout)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_position_embeddings(self) -> nn.Embedding:
+ """
+ Returns the position embeddings
+ """
+ return self.distilbert.get_position_embeddings()
+
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
+ """
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
+
+ Arguments:
+ new_num_position_embeddings (`int`):
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
+ the size will remove vectors from the end.
+ """
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
+
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=SequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[SequenceClassifierOutput, Tuple[torch.Tensor, ...]]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ distilbert_output = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_state = distilbert_output[0] # (bs, seq_len, dim)
+ pooled_output = hidden_state[:, 0] # (bs, dim)
+ pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
+ pooled_output = nn.ReLU()(pooled_output) # (bs, dim)
+ pooled_output = self.dropout(pooled_output) # (bs, dim)
+ logits = self.classifier(pooled_output) # (bs, num_labels)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + distilbert_output[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=distilbert_output.hidden_states,
+ attentions=distilbert_output.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
+ linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ DISTILBERT_START_DOCSTRING,
+)
+class DistilBertForQuestionAnswering(DistilBertPreTrainedModel):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__(config)
+
+ self.distilbert = DistilBertModel(config)
+ self.qa_outputs = nn.Linear(config.dim, config.num_labels)
+ if config.num_labels != 2:
+ raise ValueError(f"config.num_labels should be 2, but it is {config.num_labels}")
+
+ self.dropout = nn.Dropout(config.qa_dropout)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_position_embeddings(self) -> nn.Embedding:
+ """
+ Returns the position embeddings
+ """
+ return self.distilbert.get_position_embeddings()
+
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
+ """
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
+
+ Arguments:
+ new_num_position_embeddings (`int`):
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
+ the size will remove vectors from the end.
+ """
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
+
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=QuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ start_positions: Optional[torch.Tensor] = None,
+ end_positions: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[QuestionAnsweringModelOutput, Tuple[torch.Tensor, ...]]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ distilbert_output = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = distilbert_output[0] # (bs, max_query_len, dim)
+
+ hidden_states = self.dropout(hidden_states) # (bs, max_query_len, dim)
+ logits = self.qa_outputs(hidden_states) # (bs, max_query_len, 2)
+ start_logits, end_logits = logits.split(1, dim=-1)
+ start_logits = start_logits.squeeze(-1).contiguous() # (bs, max_query_len)
+ end_logits = end_logits.squeeze(-1).contiguous() # (bs, max_query_len)
+
+ total_loss = None
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, split add a dimension
+ if len(start_positions.size()) > 1:
+ start_positions = start_positions.squeeze(-1)
+ if len(end_positions.size()) > 1:
+ end_positions = end_positions.squeeze(-1)
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
+ ignored_index = start_logits.size(1)
+ start_positions = start_positions.clamp(0, ignored_index)
+ end_positions = end_positions.clamp(0, ignored_index)
+
+ loss_fct = nn.CrossEntropyLoss(ignore_index=ignored_index)
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if not return_dict:
+ output = (start_logits, end_logits) + distilbert_output[1:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return QuestionAnsweringModelOutput(
+ loss=total_loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=distilbert_output.hidden_states,
+ attentions=distilbert_output.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
+ for Named-Entity-Recognition (NER) tasks.
+ """,
+ DISTILBERT_START_DOCSTRING,
+)
+class DistilBertForTokenClassification(DistilBertPreTrainedModel):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.distilbert = DistilBertModel(config)
+ self.dropout = nn.Dropout(config.dropout)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_position_embeddings(self) -> nn.Embedding:
+ """
+ Returns the position embeddings
+ """
+ return self.distilbert.get_position_embeddings()
+
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
+ """
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
+
+ Arguments:
+ new_num_position_embeddings (`int`):
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
+ the size will remove vectors from the end.
+ """
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
+
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[TokenClassifierOutput, Tuple[torch.Tensor, ...]]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.distilbert(
+ input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ sequence_output = self.dropout(sequence_output)
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
+ a softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ DISTILBERT_START_DOCSTRING,
+)
+class DistilBertForMultipleChoice(DistilBertPreTrainedModel):
+ def __init__(self, config: PretrainedConfig):
+ super().__init__(config)
+
+ self.distilbert = DistilBertModel(config)
+ self.pre_classifier = nn.Linear(config.dim, config.dim)
+ self.classifier = nn.Linear(config.dim, 1)
+ self.dropout = nn.Dropout(config.seq_classif_dropout)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_position_embeddings(self) -> nn.Embedding:
+ """
+ Returns the position embeddings
+ """
+ return self.distilbert.get_position_embeddings()
+
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
+ """
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
+
+ Arguments:
+ new_num_position_embeddings (`int`)
+ The number of new position embeddings. If position embeddings are learned, increasing the size will add
+ newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
+ position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
+ add correct vectors at the end following the position encoding algorithm, whereas reducing the size
+ will remove vectors from the end.
+ """
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
+
+ @add_start_docstrings_to_model_forward(
+ DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
+ )
+ @replace_return_docstrings(output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[MultipleChoiceModelOutput, Tuple[torch.Tensor, ...]]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
+ `input_ids` above)
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, DistilBertForMultipleChoice
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("distilbert-base-cased")
+ >>> model = DistilBertForMultipleChoice.from_pretrained("distilbert-base-cased")
+
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
+ >>> choice0 = "It is eaten with a fork and a knife."
+ >>> choice1 = "It is eaten while held in the hand."
+ >>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1
+
+ >>> encoding = tokenizer([[prompt, choice0], [prompt, choice1]], return_tensors="pt", padding=True)
+ >>> outputs = model(**{k: v.unsqueeze(0) for k, v in encoding.items()}, labels=labels) # batch size is 1
+
+ >>> # the linear classifier still needs to be trained
+ >>> loss = outputs.loss
+ >>> logits = outputs.logits
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
+
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
+ inputs_embeds = (
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
+ if inputs_embeds is not None
+ else None
+ )
+
+ outputs = self.distilbert(
+ input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_state = outputs[0] # (bs * num_choices, seq_len, dim)
+ pooled_output = hidden_state[:, 0] # (bs * num_choices, dim)
+ pooled_output = self.pre_classifier(pooled_output) # (bs * num_choices, dim)
+ pooled_output = nn.ReLU()(pooled_output) # (bs * num_choices, dim)
+ pooled_output = self.dropout(pooled_output) # (bs * num_choices, dim)
+ logits = self.classifier(pooled_output) # (bs * num_choices, 1)
+
+ reshaped_logits = logits.view(-1, num_choices) # (bs, num_choices)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(reshaped_logits, labels)
+
+ if not return_dict:
+ output = (reshaped_logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return MultipleChoiceModelOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/modeling_flax_distilbert.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/modeling_flax_distilbert.py
new file mode 100644
index 0000000000000000000000000000000000000000..d3c48c077adc529a1e942fcbce1999c2d0f8d524
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/modeling_flax_distilbert.py
@@ -0,0 +1,895 @@
+# coding=utf-8
+# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import math
+from typing import Callable, Optional, Tuple
+
+import flax.linen as nn
+import jax
+import jax.numpy as jnp
+import numpy as np
+from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
+from flax.traverse_util import flatten_dict, unflatten_dict
+from jax import lax
+
+from ...modeling_flax_outputs import (
+ FlaxBaseModelOutput,
+ FlaxMaskedLMOutput,
+ FlaxMultipleChoiceModelOutput,
+ FlaxQuestionAnsweringModelOutput,
+ FlaxSequenceClassifierOutput,
+ FlaxTokenClassifierOutput,
+)
+from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, overwrite_call_docstring
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging
+from .configuration_distilbert import DistilBertConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "distilbert-base-uncased"
+_CONFIG_FOR_DOC = "DistilBertConfig"
+
+
+FLAX_DISTILBERT_START_DOCSTRING = r"""
+
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading, saving and converting weights from PyTorch models)
+
+ This model is also a
+ [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as
+ a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and
+ behavior.
+
+ Finally, this model supports inherent JAX features such as:
+
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
+
+ Parameters:
+ config ([`DistilBertConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+DISTILBERT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`numpy.ndarray` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`numpy.ndarray` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+def get_angles(pos, i, d_model):
+ angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model))
+ return pos * angle_rates
+
+
+def positional_encoding(position, d_model):
+ # create the sinusoidal pattern for the positional encoding
+ angle_rads = get_angles(np.arange(position)[:, np.newaxis], np.arange(d_model)[np.newaxis, :], d_model)
+
+ # apply sin to even indices in the array; 2i
+ angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
+
+ # apply cos to odd indices in the array; 2i+1
+ angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
+
+ pos_encoding = angle_rads[np.newaxis, ...]
+
+ return jnp.array(pos_encoding)
+
+
+class FlaxEmbeddings(nn.Module):
+ """Construct the embeddings from word, position and token_type embeddings."""
+
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.word_embeddings = nn.Embed(
+ self.config.vocab_size,
+ self.config.dim,
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ if not self.config.sinusoidal_pos_embds:
+ self.position_embeddings = nn.Embed(
+ self.config.max_position_embeddings,
+ self.config.dim,
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ else:
+ self.pos_encoding = positional_encoding(self.config.max_position_embeddings, self.config.dim)
+ self.LayerNorm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype)
+ self.dropout = nn.Dropout(rate=self.config.dropout)
+
+ def __call__(self, input_ids, deterministic: bool = True):
+ # Embed
+ batch_size, seq_length = input_ids.shape
+ inputs_embeds = self.word_embeddings(input_ids.astype("i4"))
+ if not self.config.sinusoidal_pos_embds:
+ position_ids = jnp.arange(seq_length).astype("i4")
+ position_ids = jnp.broadcast_to(position_ids, shape=(batch_size, seq_length))
+ position_embeds = self.position_embeddings(position_ids.astype("i4"))
+ else:
+ position_embeds = self.pos_encoding[:, :seq_length, :]
+ # explicitly cast the positions here, since self.embed_positions are not registered as parameters
+ position_embeds = position_embeds.astype(inputs_embeds.dtype)
+
+ # Sum all embeddings
+ hidden_states = inputs_embeds + position_embeds
+
+ # Layer Norm
+ hidden_states = self.LayerNorm(hidden_states)
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ return hidden_states
+
+
+class FlaxMultiHeadSelfAttention(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.n_heads = self.config.n_heads
+ self.dim = self.config.dim
+ self.dropout = nn.Dropout(rate=self.config.attention_dropout)
+
+ if not (self.dim % self.n_heads == 0):
+ raise ValueError(f"Hidden size {self.dim} not dividable by number of heads {self.n_heads}")
+
+ self.q_lin = nn.Dense(
+ self.dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ self.k_lin = nn.Dense(
+ self.dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ self.v_lin = nn.Dense(
+ self.dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ self.out_lin = nn.Dense(
+ self.dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+
+ def __call__(
+ self,
+ query,
+ key,
+ value,
+ mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ ):
+ bs, q_len, dim = query.shape
+ k_len = key.shape[1]
+ # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'
+ # assert key.size() == value.size()
+
+ dim_per_head = self.dim // self.n_heads
+
+ mask_reshp = (bs, 1, 1, k_len)
+
+ def shape(x):
+ """separate heads"""
+ return x.reshape(bs, -1, self.n_heads, dim_per_head).transpose(0, 2, 1, 3)
+
+ def unshape(x):
+ """group heads"""
+ return x.transpose(0, 2, 1, 3).reshape(bs, -1, self.n_heads * dim_per_head)
+
+ q = shape(self.q_lin(query)) # (bs, n_heads, q_len, dim_per_head)
+ k = shape(self.k_lin(key)) # (bs, n_heads, k_len, dim_per_head)
+ v = shape(self.v_lin(value)) # (bs, n_heads, k_len, dim_per_head)
+
+ q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_len, dim_per_head)
+ scores = jnp.matmul(q, k.transpose(0, 1, 3, 2)) # (bs, n_heads, q_len, k_len)
+ mask = jnp.reshape(mask, mask_reshp)
+
+ mask = mask.astype(scores.dtype)
+ scores = scores - 1e30 * (1.0 - mask)
+
+ weights = nn.softmax(scores, axis=-1) # (bs, n_heads, q_len, k_len)
+ weights = self.dropout(weights, deterministic=deterministic)
+
+ context = jnp.matmul(weights, v) # (bs, n_heads, q_len, dim_per_head)
+ context = unshape(context) # (bs, q_len, dim)
+ context = self.out_lin(context) # (bs, q_len, dim)
+
+ if output_attentions:
+ return (context, weights)
+ else:
+ return (context,)
+
+
+class FlaxFFN(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.dropout = nn.Dropout(rate=self.config.dropout)
+ self.chunk_size_feed_forward = self.config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.lin1 = nn.Dense(
+ self.config.hidden_dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ self.lin2 = nn.Dense(
+ self.config.dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+
+ self.activation = ACT2FN[self.config.activation]
+
+ def __call__(self, hidden_states, deterministic: bool = True):
+ hidden_states = self.lin1(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ hidden_states = self.lin2(hidden_states)
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ return hidden_states
+
+
+class FlaxTransformerBlock(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ assert (
+ self.config.dim % self.config.n_heads == 0
+ ), f"Hidden size {self.config.dim} not dividable by number of heads {self.config.n_heads}"
+
+ self.attention = FlaxMultiHeadSelfAttention(self.config, dtype=self.dtype)
+ self.sa_layer_norm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype)
+
+ self.ffn = FlaxFFN(self.config, dtype=self.dtype)
+ self.output_layer_norm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype)
+
+ def __call__(
+ self,
+ hidden_states,
+ attn_mask,
+ output_attentions: bool = False,
+ deterministic: bool = True,
+ ):
+ # Self-Attention
+ sa_output = self.attention(
+ query=hidden_states,
+ key=hidden_states,
+ value=hidden_states,
+ mask=attn_mask,
+ output_attentions=output_attentions,
+ deterministic=deterministic,
+ )
+ if output_attentions:
+ sa_output, sa_weights = sa_output
+ else:
+ assert type(sa_output) == tuple
+ sa_output = sa_output[0]
+ sa_output = self.sa_layer_norm(sa_output + hidden_states)
+
+ # Feed Forward Network
+ ffn_output = self.ffn(sa_output, deterministic=deterministic)
+ ffn_output = self.output_layer_norm(ffn_output + sa_output)
+ output = (ffn_output,)
+ if output_attentions:
+ output = (sa_weights,) + output
+ return output
+
+
+class FlaxTransformer(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.layers = [
+ FlaxTransformerBlock(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.n_layers)
+ ]
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ deterministic: bool = True,
+ return_dict: bool = False,
+ ):
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ for layer_module in self.layers:
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_outputs = layer_module(
+ hidden_states=hidden_states,
+ attn_mask=attention_mask,
+ output_attentions=output_attentions,
+ deterministic=deterministic,
+ )
+ hidden_states = layer_outputs[-1]
+
+ if output_attentions:
+ assert len(layer_outputs) == 2
+ attentions = layer_outputs[0]
+ all_attentions = all_attentions + (attentions,)
+ else:
+ assert len(layer_outputs) == 1
+
+ # Add last layer
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_attentions, all_hidden_states] if v is not None)
+ return FlaxBaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
+ )
+
+
+class FlaxTransformerEncoder(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.layer = FlaxTransformer(self.config, dtype=self.dtype)
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ deterministic: bool = True,
+ return_dict: bool = False,
+ ):
+ return self.layer(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ deterministic=deterministic,
+ return_dict=return_dict,
+ )
+
+
+class FlaxDistilBertLMDecoder(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+ bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros
+
+ def setup(self):
+ self.bias = self.param("bias", self.bias_init, (self.config.vocab_size,))
+
+ def __call__(self, inputs, kernel):
+ inputs = jnp.asarray(inputs, self.dtype)
+ kernel = jnp.asarray(kernel, self.dtype)
+ y = lax.dot_general(inputs, kernel, (((inputs.ndim - 1,), (0,)), ((), ())))
+ bias = jnp.asarray(self.bias, self.dtype)
+ y = y + bias
+ return y
+
+
+class FlaxDistilBertPreTrainedModel(FlaxPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = DistilBertConfig
+ base_model_prefix = "distilbert"
+ module_class: nn.Module = None
+
+ def __init__(
+ self,
+ config: DistilBertConfig,
+ input_shape: Tuple = (1, 1),
+ seed: int = 0,
+ dtype: jnp.dtype = jnp.float32,
+ _do_init: bool = True,
+ **kwargs,
+ ):
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
+
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
+ # init input tensors
+ input_ids = jnp.zeros(input_shape, dtype="i4")
+ attention_mask = jnp.ones_like(input_ids)
+
+ params_rng, dropout_rng = jax.random.split(rng)
+ rngs = {"params": params_rng, "dropout": dropout_rng}
+
+ random_params = self.module.init(rngs, input_ids, attention_mask, return_dict=False)["params"]
+
+ if params is not None:
+ random_params = flatten_dict(unfreeze(random_params))
+ params = flatten_dict(unfreeze(params))
+ for missing_key in self._missing_keys:
+ params[missing_key] = random_params[missing_key]
+ self._missing_keys = set()
+ return freeze(unflatten_dict(params))
+ else:
+ return random_params
+
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ def __call__(
+ self,
+ input_ids,
+ attention_mask=None,
+ head_mask=None,
+ params: dict = None,
+ dropout_rng: jax.random.PRNGKey = None,
+ train: bool = False,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ):
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ if attention_mask is None:
+ attention_mask = jnp.ones_like(input_ids)
+
+ # Handle any PRNG if needed
+ rngs = {}
+ if dropout_rng is not None:
+ rngs["dropout"] = dropout_rng
+
+ return self.module.apply(
+ {"params": params or self.params},
+ jnp.array(input_ids, dtype="i4"),
+ jnp.array(attention_mask, dtype="i4"),
+ not train,
+ output_attentions,
+ output_hidden_states,
+ return_dict,
+ rngs=rngs,
+ )
+
+
+class FlaxDistilBertModule(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.embeddings = FlaxEmbeddings(self.config, dtype=self.dtype)
+ self.transformer = FlaxTransformerEncoder(self.config, dtype=self.dtype)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ input_embeds = self.embeddings(input_ids, deterministic=deterministic)
+ return self.transformer(
+ hidden_states=input_embeds,
+ attention_mask=attention_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+
+@add_start_docstrings(
+ "The bare DistilBert Model transformer outputting raw hidden-states without any specific head on top.",
+ FLAX_DISTILBERT_START_DOCSTRING,
+)
+class FlaxDistilBertModel(FlaxDistilBertPreTrainedModel):
+ module_class = FlaxDistilBertModule
+
+
+append_call_sample_docstring(FlaxDistilBertModel, _CHECKPOINT_FOR_DOC, None, _CONFIG_FOR_DOC)
+
+
+class FlaxDistilBertForMaskedLMModule(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.distilbert = FlaxDistilBertModule(self.config, dtype=self.dtype)
+ self.vocab_transform = nn.Dense(
+ self.config.dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ self.vocab_layer_norm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype)
+ if self.config.tie_word_embeddings:
+ self.vocab_projector = FlaxDistilBertLMDecoder(
+ self.config,
+ dtype=self.dtype,
+ )
+ else:
+ self.vocab_projector = nn.Dense(
+ self.config.vocab_size,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ dlbrt_output = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ deterministic=deterministic,
+ return_dict=return_dict,
+ )
+ hidden_states = dlbrt_output[0]
+ prediction_logits = self.vocab_transform(hidden_states)
+ prediction_logits = ACT2FN[self.config.activation](prediction_logits)
+ prediction_logits = self.vocab_layer_norm(prediction_logits)
+
+ if self.config.tie_word_embeddings:
+ shared_embedding = self.distilbert.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
+ prediction_logits = self.vocab_projector(prediction_logits, shared_embedding.T)
+ else:
+ prediction_logits = self.vocab_projector(prediction_logits)
+
+ if not return_dict:
+ output = (prediction_logits,) + dlbrt_output[1:]
+ return output
+
+ return FlaxMaskedLMOutput(
+ logits=prediction_logits,
+ hidden_states=dlbrt_output.hidden_states,
+ attentions=dlbrt_output.attentions,
+ )
+
+
+@add_start_docstrings("""DistilBert Model with a `language modeling` head on top.""", FLAX_DISTILBERT_START_DOCSTRING)
+class FlaxDistilBertForMaskedLM(FlaxDistilBertPreTrainedModel):
+ module_class = FlaxDistilBertForMaskedLMModule
+
+
+append_call_sample_docstring(FlaxDistilBertForMaskedLM, _CHECKPOINT_FOR_DOC, FlaxMaskedLMOutput, _CONFIG_FOR_DOC)
+
+
+class FlaxDistilBertForSequenceClassificationModule(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype)
+ self.pre_classifier = nn.Dense(
+ self.config.dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ self.dropout = nn.Dropout(rate=self.config.seq_classif_dropout)
+ self.classifier = nn.Dense(
+ self.config.num_labels,
+ dtype=self.dtype,
+ )
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ # Model
+ distilbert_output = self.distilbert(
+ input_ids,
+ attention_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_state = distilbert_output[0] # (bs, seq_len, dim)
+ pooled_output = hidden_state[:, 0] # (bs, dim)
+ pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
+ pooled_output = ACT2FN["relu"](pooled_output)
+ pooled_output = self.dropout(pooled_output, deterministic=deterministic)
+ logits = self.classifier(pooled_output) # (bs, dim)
+
+ if not return_dict:
+ return (logits,) + distilbert_output[1:]
+
+ return FlaxSequenceClassifierOutput(
+ logits=logits,
+ hidden_states=distilbert_output.hidden_states,
+ attentions=distilbert_output.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
+ pooled output) e.g. for GLUE tasks.
+ """,
+ FLAX_DISTILBERT_START_DOCSTRING,
+)
+class FlaxDistilBertForSequenceClassification(FlaxDistilBertPreTrainedModel):
+ module_class = FlaxDistilBertForSequenceClassificationModule
+
+
+append_call_sample_docstring(
+ FlaxDistilBertForSequenceClassification,
+ _CHECKPOINT_FOR_DOC,
+ FlaxSequenceClassifierOutput,
+ _CONFIG_FOR_DOC,
+)
+
+
+class FlaxDistilBertForMultipleChoiceModule(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype)
+ self.pre_classifier = nn.Dense(
+ self.config.dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ self.dropout = nn.Dropout(rate=self.config.seq_classif_dropout)
+ self.classifier = nn.Dense(
+ 1,
+ dtype=self.dtype,
+ )
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ num_choices = input_ids.shape[1]
+ input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None
+ attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None
+
+ # Model
+ outputs = self.distilbert(
+ input_ids,
+ attention_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_state = outputs[0]
+ pooled_output = hidden_state[:, 0]
+ pooled_output = self.pre_classifier(pooled_output)
+ pooled_output = ACT2FN["relu"](pooled_output)
+ pooled_output = self.dropout(pooled_output, deterministic=deterministic)
+ logits = self.classifier(pooled_output)
+
+ reshaped_logits = logits.reshape(-1, num_choices)
+
+ if not return_dict:
+ return (reshaped_logits,) + outputs[2:]
+
+ return FlaxMultipleChoiceModelOutput(
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
+ a softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ FLAX_DISTILBERT_START_DOCSTRING,
+)
+class FlaxDistilBertForMultipleChoice(FlaxDistilBertPreTrainedModel):
+ module_class = FlaxDistilBertForMultipleChoiceModule
+
+
+overwrite_call_docstring(
+ FlaxDistilBertForMultipleChoice, DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
+)
+append_call_sample_docstring(
+ FlaxDistilBertForMultipleChoice,
+ _CHECKPOINT_FOR_DOC,
+ FlaxMultipleChoiceModelOutput,
+ _CONFIG_FOR_DOC,
+)
+
+
+class FlaxDistilBertForTokenClassificationModule(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype)
+ self.dropout = nn.Dropout(rate=self.config.dropout)
+ self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ # Model
+ outputs = self.distilbert(
+ input_ids,
+ attention_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ logits = self.classifier(hidden_states)
+
+ if not return_dict:
+ return (logits,) + outputs[1:]
+
+ return FlaxTokenClassifierOutput(
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
+ for Named-Entity-Recognition (NER) tasks.
+ """,
+ FLAX_DISTILBERT_START_DOCSTRING,
+)
+class FlaxDistilBertForTokenClassification(FlaxDistilBertPreTrainedModel):
+ module_class = FlaxDistilBertForTokenClassificationModule
+
+
+append_call_sample_docstring(
+ FlaxDistilBertForTokenClassification,
+ _CHECKPOINT_FOR_DOC,
+ FlaxTokenClassifierOutput,
+ _CONFIG_FOR_DOC,
+)
+
+
+class FlaxDistilBertForQuestionAnsweringModule(nn.Module):
+ config: DistilBertConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype)
+ self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype)
+ assert self.config.num_labels == 2
+ self.dropout = nn.Dropout(rate=self.config.qa_dropout)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # Model
+ distilbert_output = self.distilbert(
+ input_ids,
+ attention_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = distilbert_output[0]
+
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ logits = self.qa_outputs(hidden_states)
+ start_logits, end_logits = logits.split(self.config.num_labels, axis=-1)
+ start_logits = start_logits.squeeze(-1)
+ end_logits = end_logits.squeeze(-1)
+
+ if not return_dict:
+ return (start_logits, end_logits) + distilbert_output[1:]
+
+ return FlaxQuestionAnsweringModelOutput(
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=distilbert_output.hidden_states,
+ attentions=distilbert_output.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
+ linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ FLAX_DISTILBERT_START_DOCSTRING,
+)
+class FlaxDistilBertForQuestionAnswering(FlaxDistilBertPreTrainedModel):
+ module_class = FlaxDistilBertForQuestionAnsweringModule
+
+
+append_call_sample_docstring(
+ FlaxDistilBertForQuestionAnswering,
+ _CHECKPOINT_FOR_DOC,
+ FlaxQuestionAnsweringModelOutput,
+ _CONFIG_FOR_DOC,
+)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/modeling_tf_distilbert.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/modeling_tf_distilbert.py
new file mode 100644
index 0000000000000000000000000000000000000000..39fd470597fa870b924b75e8d1fe8c61cff1d66e
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/modeling_tf_distilbert.py
@@ -0,0 +1,1146 @@
+# coding=utf-8
+# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+ TF 2.0 DistilBERT model
+"""
+
+
+from __future__ import annotations
+
+import warnings
+from typing import Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ...activations_tf import get_tf_activation
+from ...modeling_tf_outputs import (
+ TFBaseModelOutput,
+ TFMaskedLMOutput,
+ TFMultipleChoiceModelOutput,
+ TFQuestionAnsweringModelOutput,
+ TFSequenceClassifierOutput,
+ TFTokenClassifierOutput,
+)
+from ...modeling_tf_utils import (
+ TFMaskedLanguageModelingLoss,
+ TFModelInputType,
+ TFMultipleChoiceLoss,
+ TFPreTrainedModel,
+ TFQuestionAnsweringLoss,
+ TFSequenceClassificationLoss,
+ TFTokenClassificationLoss,
+ get_initializer,
+ keras,
+ keras_serializable,
+ unpack_inputs,
+)
+from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+)
+from .configuration_distilbert import DistilBertConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "distilbert-base-uncased"
+_CONFIG_FOR_DOC = "DistilBertConfig"
+
+TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "distilbert-base-uncased",
+ "distilbert-base-uncased-distilled-squad",
+ "distilbert-base-cased",
+ "distilbert-base-cased-distilled-squad",
+ "distilbert-base-multilingual-cased",
+ "distilbert-base-uncased-finetuned-sst-2-english",
+ # See all DistilBERT models at https://huggingface.co/models?filter=distilbert
+]
+
+
+class TFEmbeddings(keras.layers.Layer):
+ """Construct the embeddings from word, position and token_type embeddings."""
+
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+ self.dim = config.dim
+ self.initializer_range = config.initializer_range
+ self.max_position_embeddings = config.max_position_embeddings
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=1e-12, name="LayerNorm")
+ self.dropout = keras.layers.Dropout(rate=config.dropout)
+
+ def build(self, input_shape=None):
+ with tf.name_scope("word_embeddings"):
+ self.weight = self.add_weight(
+ name="weight",
+ shape=[self.config.vocab_size, self.dim],
+ initializer=get_initializer(initializer_range=self.initializer_range),
+ )
+
+ with tf.name_scope("position_embeddings"):
+ self.position_embeddings = self.add_weight(
+ name="embeddings",
+ shape=[self.max_position_embeddings, self.dim],
+ initializer=get_initializer(initializer_range=self.initializer_range),
+ )
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.dim])
+
+ def call(self, input_ids=None, position_ids=None, inputs_embeds=None, training=False):
+ """
+ Applies embedding based on inputs tensor.
+
+ Returns:
+ final_embeddings (`tf.Tensor`): output embedding tensor.
+ """
+ assert not (input_ids is None and inputs_embeds is None)
+
+ if input_ids is not None:
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
+
+ input_shape = shape_list(inputs_embeds)[:-1]
+
+ if position_ids is None:
+ position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
+
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
+ final_embeddings = inputs_embeds + position_embeds
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
+
+ return final_embeddings
+
+
+class TFMultiHeadSelfAttention(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.n_heads = config.n_heads
+ self.dim = config.dim
+ self.dropout = keras.layers.Dropout(config.attention_dropout)
+ self.output_attentions = config.output_attentions
+
+ assert self.dim % self.n_heads == 0, f"Hidden size {self.dim} not dividable by number of heads {self.n_heads}"
+
+ self.q_lin = keras.layers.Dense(
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="q_lin"
+ )
+ self.k_lin = keras.layers.Dense(
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="k_lin"
+ )
+ self.v_lin = keras.layers.Dense(
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="v_lin"
+ )
+ self.out_lin = keras.layers.Dense(
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="out_lin"
+ )
+
+ self.pruned_heads = set()
+ self.config = config
+
+ def prune_heads(self, heads):
+ raise NotImplementedError
+
+ def call(self, query, key, value, mask, head_mask, output_attentions, training=False):
+ """
+ Parameters:
+ query: tf.Tensor(bs, seq_length, dim)
+ key: tf.Tensor(bs, seq_length, dim)
+ value: tf.Tensor(bs, seq_length, dim)
+ mask: tf.Tensor(bs, seq_length)
+
+ Returns:
+ weights: tf.Tensor(bs, n_heads, seq_length, seq_length) Attention weights context: tf.Tensor(bs,
+ seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
+ """
+ bs, q_length, dim = shape_list(query)
+ k_length = shape_list(key)[1]
+ # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'
+ # assert key.size() == value.size()
+ dim_per_head = int(self.dim / self.n_heads)
+ dim_per_head = tf.cast(dim_per_head, dtype=tf.int32)
+ mask_reshape = [bs, 1, 1, k_length]
+
+ def shape(x):
+ """separate heads"""
+ return tf.transpose(tf.reshape(x, (bs, -1, self.n_heads, dim_per_head)), perm=(0, 2, 1, 3))
+
+ def unshape(x):
+ """group heads"""
+ return tf.reshape(tf.transpose(x, perm=(0, 2, 1, 3)), (bs, -1, self.n_heads * dim_per_head))
+
+ q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head)
+ k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head)
+ v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head)
+ q = tf.cast(q, dtype=tf.float32)
+ q = tf.multiply(q, tf.math.rsqrt(tf.cast(dim_per_head, dtype=tf.float32)))
+ k = tf.cast(k, dtype=q.dtype)
+ scores = tf.matmul(q, k, transpose_b=True) # (bs, n_heads, q_length, k_length)
+ mask = tf.reshape(mask, mask_reshape) # (bs, n_heads, qlen, klen)
+ # scores.masked_fill_(mask, -float('inf')) # (bs, n_heads, q_length, k_length)
+
+ mask = tf.cast(mask, dtype=scores.dtype)
+ scores = scores - 1e30 * (1.0 - mask)
+ weights = stable_softmax(scores, axis=-1) # (bs, n_heads, qlen, klen)
+ weights = self.dropout(weights, training=training) # (bs, n_heads, qlen, klen)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ weights = weights * head_mask
+
+ context = tf.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)
+ context = unshape(context) # (bs, q_length, dim)
+ context = self.out_lin(context) # (bs, q_length, dim)
+
+ if output_attentions:
+ return (context, weights)
+ else:
+ return (context,)
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "q_lin", None) is not None:
+ with tf.name_scope(self.q_lin.name):
+ self.q_lin.build([None, None, self.config.dim])
+ if getattr(self, "k_lin", None) is not None:
+ with tf.name_scope(self.k_lin.name):
+ self.k_lin.build([None, None, self.config.dim])
+ if getattr(self, "v_lin", None) is not None:
+ with tf.name_scope(self.v_lin.name):
+ self.v_lin.build([None, None, self.config.dim])
+ if getattr(self, "out_lin", None) is not None:
+ with tf.name_scope(self.out_lin.name):
+ self.out_lin.build([None, None, self.config.dim])
+
+
+class TFFFN(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.dropout = keras.layers.Dropout(config.dropout)
+ self.lin1 = keras.layers.Dense(
+ config.hidden_dim, kernel_initializer=get_initializer(config.initializer_range), name="lin1"
+ )
+ self.lin2 = keras.layers.Dense(
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="lin2"
+ )
+ self.activation = get_tf_activation(config.activation)
+ self.config = config
+
+ def call(self, input, training=False):
+ x = self.lin1(input)
+ x = self.activation(x)
+ x = self.lin2(x)
+ x = self.dropout(x, training=training)
+ return x
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "lin1", None) is not None:
+ with tf.name_scope(self.lin1.name):
+ self.lin1.build([None, None, self.config.dim])
+ if getattr(self, "lin2", None) is not None:
+ with tf.name_scope(self.lin2.name):
+ self.lin2.build([None, None, self.config.hidden_dim])
+
+
+class TFTransformerBlock(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.n_heads = config.n_heads
+ self.dim = config.dim
+ self.hidden_dim = config.hidden_dim
+ self.dropout = keras.layers.Dropout(config.dropout)
+ self.activation = config.activation
+ self.output_attentions = config.output_attentions
+
+ assert (
+ config.dim % config.n_heads == 0
+ ), f"Hidden size {config.dim} not dividable by number of heads {config.n_heads}"
+
+ self.attention = TFMultiHeadSelfAttention(config, name="attention")
+ self.sa_layer_norm = keras.layers.LayerNormalization(epsilon=1e-12, name="sa_layer_norm")
+
+ self.ffn = TFFFN(config, name="ffn")
+ self.output_layer_norm = keras.layers.LayerNormalization(epsilon=1e-12, name="output_layer_norm")
+ self.config = config
+
+ def call(self, x, attn_mask, head_mask, output_attentions, training=False): # removed: src_enc=None, src_len=None
+ """
+ Parameters:
+ x: tf.Tensor(bs, seq_length, dim)
+ attn_mask: tf.Tensor(bs, seq_length)
+
+ Outputs: sa_weights: tf.Tensor(bs, n_heads, seq_length, seq_length) The attention weights ffn_output:
+ tf.Tensor(bs, seq_length, dim) The output of the transformer block contextualization.
+ """
+ # Self-Attention
+ sa_output = self.attention(x, x, x, attn_mask, head_mask, output_attentions, training=training)
+ if output_attentions:
+ sa_output, sa_weights = sa_output # (bs, seq_length, dim), (bs, n_heads, seq_length, seq_length)
+ else: # To handle these `output_attentions` or `output_hidden_states` cases returning tuples
+ # assert type(sa_output) == tuple
+ sa_output = sa_output[0]
+ sa_output = self.sa_layer_norm(sa_output + x) # (bs, seq_length, dim)
+
+ # Feed Forward Network
+ ffn_output = self.ffn(sa_output, training=training) # (bs, seq_length, dim)
+ ffn_output = self.output_layer_norm(ffn_output + sa_output) # (bs, seq_length, dim)
+
+ output = (ffn_output,)
+ if output_attentions:
+ output = (sa_weights,) + output
+ return output
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "attention", None) is not None:
+ with tf.name_scope(self.attention.name):
+ self.attention.build(None)
+ if getattr(self, "sa_layer_norm", None) is not None:
+ with tf.name_scope(self.sa_layer_norm.name):
+ self.sa_layer_norm.build([None, None, self.config.dim])
+ if getattr(self, "ffn", None) is not None:
+ with tf.name_scope(self.ffn.name):
+ self.ffn.build(None)
+ if getattr(self, "output_layer_norm", None) is not None:
+ with tf.name_scope(self.output_layer_norm.name):
+ self.output_layer_norm.build([None, None, self.config.dim])
+
+
+class TFTransformer(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.n_layers = config.n_layers
+ self.output_hidden_states = config.output_hidden_states
+ self.output_attentions = config.output_attentions
+
+ self.layer = [TFTransformerBlock(config, name=f"layer_._{i}") for i in range(config.n_layers)]
+
+ def call(self, x, attn_mask, head_mask, output_attentions, output_hidden_states, return_dict, training=False):
+ # docstyle-ignore
+ """
+ Parameters:
+ x: tf.Tensor(bs, seq_length, dim) Input sequence embedded.
+ attn_mask: tf.Tensor(bs, seq_length) Attention mask on the sequence.
+
+ Returns:
+ hidden_state: tf.Tensor(bs, seq_length, dim)
+ Sequence of hidden states in the last (top) layer
+ all_hidden_states: Tuple[tf.Tensor(bs, seq_length, dim)]
+ Tuple of length n_layers with the hidden states from each layer.
+ Optional: only if output_hidden_states=True
+ all_attentions: Tuple[tf.Tensor(bs, n_heads, seq_length, seq_length)]
+ Tuple of length n_layers with the attention weights from each layer
+ Optional: only if output_attentions=True
+ """
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ hidden_state = x
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_state,)
+
+ layer_outputs = layer_module(hidden_state, attn_mask, head_mask[i], output_attentions, training=training)
+ hidden_state = layer_outputs[-1]
+
+ if output_attentions:
+ assert len(layer_outputs) == 2
+ attentions = layer_outputs[0]
+ all_attentions = all_attentions + (attentions,)
+ else:
+ assert len(layer_outputs) == 1, f"Incorrect number of outputs {len(layer_outputs)} instead of 1"
+
+ # Add last layer
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_state,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_state, all_hidden_states, all_attentions] if v is not None)
+ return TFBaseModelOutput(
+ last_hidden_state=hidden_state, hidden_states=all_hidden_states, attentions=all_attentions
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "layer", None) is not None:
+ for layer in self.layer:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+@keras_serializable
+class TFDistilBertMainLayer(keras.layers.Layer):
+ config_class = DistilBertConfig
+
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.num_hidden_layers = config.num_hidden_layers
+ self.output_attentions = config.output_attentions
+ self.output_hidden_states = config.output_hidden_states
+ self.return_dict = config.use_return_dict
+
+ self.embeddings = TFEmbeddings(config, name="embeddings") # Embeddings
+ self.transformer = TFTransformer(config, name="transformer") # Encoder
+
+ def get_input_embeddings(self):
+ return self.embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.weight = value
+ self.embeddings.vocab_size = value.shape[0]
+
+ def _prune_heads(self, heads_to_prune):
+ raise NotImplementedError
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ head_mask=None,
+ inputs_embeds=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ training=False,
+ ):
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = shape_list(input_ids)
+ elif inputs_embeds is not None:
+ input_shape = shape_list(inputs_embeds)[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if attention_mask is None:
+ attention_mask = tf.ones(input_shape) # (bs, seq_length)
+
+ attention_mask = tf.cast(attention_mask, dtype=tf.float32)
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ if head_mask is not None:
+ raise NotImplementedError
+ else:
+ head_mask = [None] * self.num_hidden_layers
+
+ embedding_output = self.embeddings(input_ids, inputs_embeds=inputs_embeds) # (bs, seq_length, dim)
+ tfmr_output = self.transformer(
+ embedding_output,
+ attention_mask,
+ head_mask,
+ output_attentions,
+ output_hidden_states,
+ return_dict,
+ training=training,
+ )
+
+ return tfmr_output # last-layer hidden-state, (all hidden_states), (all attentions)
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embeddings", None) is not None:
+ with tf.name_scope(self.embeddings.name):
+ self.embeddings.build(None)
+ if getattr(self, "transformer", None) is not None:
+ with tf.name_scope(self.transformer.name):
+ self.transformer.build(None)
+
+
+# INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL #
+class TFDistilBertPreTrainedModel(TFPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = DistilBertConfig
+ base_model_prefix = "distilbert"
+
+
+DISTILBERT_START_DOCSTRING = r"""
+
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TensorFlow models and layers in `transformers` accept two formats as input:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional argument.
+
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
+ positional argument:
+
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
+
+ Note that when creating models and layers with
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
+ about any of this, as you can just pass inputs like you would to any other Python function!
+
+
+
+ Parameters:
+ config ([`DistilBertConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+DISTILBERT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
+ [`PreTrainedTokenizer.encode`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
+ config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+"""
+
+
+@add_start_docstrings(
+ "The bare DistilBERT encoder/transformer outputting raw hidden-states without any specific head on top.",
+ DISTILBERT_START_DOCSTRING,
+)
+class TFDistilBertModel(TFDistilBertPreTrainedModel):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert") # Embeddings
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFBaseModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
+ outputs = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "distilbert", None) is not None:
+ with tf.name_scope(self.distilbert.name):
+ self.distilbert.build(None)
+
+
+class TFDistilBertLMHead(keras.layers.Layer):
+ def __init__(self, config, input_embeddings, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.dim = config.dim
+
+ # The output weights are the same as the input embeddings, but there is
+ # an output-only bias for each token.
+ self.input_embeddings = input_embeddings
+
+ def build(self, input_shape):
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
+
+ super().build(input_shape)
+
+ def get_output_embeddings(self):
+ return self.input_embeddings
+
+ def set_output_embeddings(self, value):
+ self.input_embeddings.weight = value
+ self.input_embeddings.vocab_size = shape_list(value)[0]
+
+ def get_bias(self):
+ return {"bias": self.bias}
+
+ def set_bias(self, value):
+ self.bias = value["bias"]
+ self.config.vocab_size = shape_list(value["bias"])[0]
+
+ def call(self, hidden_states):
+ seq_length = shape_list(tensor=hidden_states)[1]
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.dim])
+ hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
+
+ return hidden_states
+
+
+@add_start_docstrings(
+ """DistilBert Model with a `masked language modeling` head on top.""",
+ DISTILBERT_START_DOCSTRING,
+)
+class TFDistilBertForMaskedLM(TFDistilBertPreTrainedModel, TFMaskedLanguageModelingLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.config = config
+
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
+ self.vocab_transform = keras.layers.Dense(
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="vocab_transform"
+ )
+ self.act = get_tf_activation(config.activation)
+ self.vocab_layer_norm = keras.layers.LayerNormalization(epsilon=1e-12, name="vocab_layer_norm")
+ self.vocab_projector = TFDistilBertLMHead(config, self.distilbert.embeddings, name="vocab_projector")
+
+ def get_lm_head(self):
+ return self.vocab_projector
+
+ def get_prefix_bias_name(self):
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
+ return self.name + "/" + self.vocab_projector.name
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFMaskedLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ """
+ distilbert_output = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ hidden_states = distilbert_output[0] # (bs, seq_length, dim)
+ prediction_logits = self.vocab_transform(hidden_states) # (bs, seq_length, dim)
+ prediction_logits = self.act(prediction_logits) # (bs, seq_length, dim)
+ prediction_logits = self.vocab_layer_norm(prediction_logits) # (bs, seq_length, dim)
+ prediction_logits = self.vocab_projector(prediction_logits)
+
+ loss = None if labels is None else self.hf_compute_loss(labels, prediction_logits)
+
+ if not return_dict:
+ output = (prediction_logits,) + distilbert_output[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFMaskedLMOutput(
+ loss=loss,
+ logits=prediction_logits,
+ hidden_states=distilbert_output.hidden_states,
+ attentions=distilbert_output.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "distilbert", None) is not None:
+ with tf.name_scope(self.distilbert.name):
+ self.distilbert.build(None)
+ if getattr(self, "vocab_transform", None) is not None:
+ with tf.name_scope(self.vocab_transform.name):
+ self.vocab_transform.build([None, None, self.config.dim])
+ if getattr(self, "vocab_layer_norm", None) is not None:
+ with tf.name_scope(self.vocab_layer_norm.name):
+ self.vocab_layer_norm.build([None, None, self.config.dim])
+ if getattr(self, "vocab_projector", None) is not None:
+ with tf.name_scope(self.vocab_projector.name):
+ self.vocab_projector.build(None)
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
+ pooled output) e.g. for GLUE tasks.
+ """,
+ DISTILBERT_START_DOCSTRING,
+)
+class TFDistilBertForSequenceClassification(TFDistilBertPreTrainedModel, TFSequenceClassificationLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.num_labels = config.num_labels
+
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
+ self.pre_classifier = keras.layers.Dense(
+ config.dim,
+ kernel_initializer=get_initializer(config.initializer_range),
+ activation="relu",
+ name="pre_classifier",
+ )
+ self.classifier = keras.layers.Dense(
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
+ )
+ self.dropout = keras.layers.Dropout(config.seq_classif_dropout)
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFSequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ distilbert_output = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ hidden_state = distilbert_output[0] # (bs, seq_len, dim)
+ pooled_output = hidden_state[:, 0] # (bs, dim)
+ pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
+ pooled_output = self.dropout(pooled_output, training=training) # (bs, dim)
+ logits = self.classifier(pooled_output) # (bs, dim)
+
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
+
+ if not return_dict:
+ output = (logits,) + distilbert_output[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFSequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=distilbert_output.hidden_states,
+ attentions=distilbert_output.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "distilbert", None) is not None:
+ with tf.name_scope(self.distilbert.name):
+ self.distilbert.build(None)
+ if getattr(self, "pre_classifier", None) is not None:
+ with tf.name_scope(self.pre_classifier.name):
+ self.pre_classifier.build([None, None, self.config.dim])
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.dim])
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
+ for Named-Entity-Recognition (NER) tasks.
+ """,
+ DISTILBERT_START_DOCSTRING,
+)
+class TFDistilBertForTokenClassification(TFDistilBertPreTrainedModel, TFTokenClassificationLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.num_labels = config.num_labels
+
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
+ self.dropout = keras.layers.Dropout(config.dropout)
+ self.classifier = keras.layers.Dense(
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFTokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+ outputs = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+ sequence_output = self.dropout(sequence_output, training=training)
+ logits = self.classifier(sequence_output)
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFTokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "distilbert", None) is not None:
+ with tf.name_scope(self.distilbert.name):
+ self.distilbert.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.hidden_size])
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
+ a softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ DISTILBERT_START_DOCSTRING,
+)
+class TFDistilBertForMultipleChoice(TFDistilBertPreTrainedModel, TFMultipleChoiceLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
+ self.dropout = keras.layers.Dropout(config.seq_classif_dropout)
+ self.pre_classifier = keras.layers.Dense(
+ config.dim,
+ kernel_initializer=get_initializer(config.initializer_range),
+ activation="relu",
+ name="pre_classifier",
+ )
+ self.classifier = keras.layers.Dense(
+ 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(
+ DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
+ )
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFMultipleChoiceModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
+ """
+ if input_ids is not None:
+ num_choices = shape_list(input_ids)[1]
+ seq_length = shape_list(input_ids)[2]
+ else:
+ num_choices = shape_list(inputs_embeds)[1]
+ seq_length = shape_list(inputs_embeds)[2]
+
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
+ flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
+ flat_inputs_embeds = (
+ tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3]))
+ if inputs_embeds is not None
+ else None
+ )
+ distilbert_output = self.distilbert(
+ flat_input_ids,
+ flat_attention_mask,
+ head_mask,
+ flat_inputs_embeds,
+ output_attentions,
+ output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ hidden_state = distilbert_output[0] # (bs, seq_len, dim)
+ pooled_output = hidden_state[:, 0] # (bs, dim)
+ pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
+ pooled_output = self.dropout(pooled_output, training=training) # (bs, dim)
+ logits = self.classifier(pooled_output)
+ reshaped_logits = tf.reshape(logits, (-1, num_choices))
+
+ loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits)
+
+ if not return_dict:
+ output = (reshaped_logits,) + distilbert_output[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFMultipleChoiceModelOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ hidden_states=distilbert_output.hidden_states,
+ attentions=distilbert_output.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "distilbert", None) is not None:
+ with tf.name_scope(self.distilbert.name):
+ self.distilbert.build(None)
+ if getattr(self, "pre_classifier", None) is not None:
+ with tf.name_scope(self.pre_classifier.name):
+ self.pre_classifier.build([None, None, self.config.dim])
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.dim])
+
+
+@add_start_docstrings(
+ """
+ DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
+ linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ DISTILBERT_START_DOCSTRING,
+)
+class TFDistilBertForQuestionAnswering(TFDistilBertPreTrainedModel, TFQuestionAnsweringLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
+ self.qa_outputs = keras.layers.Dense(
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
+ )
+ assert config.num_labels == 2, f"Incorrect number of labels {config.num_labels} instead of 2"
+ self.dropout = keras.layers.Dropout(config.qa_dropout)
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFQuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ start_positions: np.ndarray | tf.Tensor | None = None,
+ end_positions: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
+ r"""
+ start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ distilbert_output = self.distilbert(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ hidden_states = distilbert_output[0] # (bs, max_query_len, dim)
+ hidden_states = self.dropout(hidden_states, training=training) # (bs, max_query_len, dim)
+ logits = self.qa_outputs(hidden_states) # (bs, max_query_len, 2)
+ start_logits, end_logits = tf.split(logits, 2, axis=-1)
+ start_logits = tf.squeeze(start_logits, axis=-1)
+ end_logits = tf.squeeze(end_logits, axis=-1)
+
+ loss = None
+ if start_positions is not None and end_positions is not None:
+ labels = {"start_position": start_positions}
+ labels["end_position"] = end_positions
+ loss = self.hf_compute_loss(labels, (start_logits, end_logits))
+
+ if not return_dict:
+ output = (start_logits, end_logits) + distilbert_output[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFQuestionAnsweringModelOutput(
+ loss=loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=distilbert_output.hidden_states,
+ attentions=distilbert_output.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "distilbert", None) is not None:
+ with tf.name_scope(self.distilbert.name):
+ self.distilbert.build(None)
+ if getattr(self, "qa_outputs", None) is not None:
+ with tf.name_scope(self.qa_outputs.name):
+ self.qa_outputs.build([None, None, self.config.dim])
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert.py
new file mode 100644
index 0000000000000000000000000000000000000000..014c41d1243b6f7d29a02b7bc7aa2d7a1c6dbd60
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert.py
@@ -0,0 +1,553 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for DistilBERT."""
+
+import collections
+import os
+import unicodedata
+from typing import List, Optional, Tuple
+
+from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {
+ "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
+ "distilbert-base-uncased-distilled-squad": (
+ "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
+ ),
+ "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
+ "distilbert-base-cased-distilled-squad": (
+ "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
+ ),
+ "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
+ "distilbert-base-multilingual-cased": (
+ "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
+ ),
+ }
+}
+
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
+ "distilbert-base-uncased": 512,
+ "distilbert-base-uncased-distilled-squad": 512,
+ "distilbert-base-cased": 512,
+ "distilbert-base-cased-distilled-squad": 512,
+ "distilbert-base-german-cased": 512,
+ "distilbert-base-multilingual-cased": 512,
+}
+
+
+PRETRAINED_INIT_CONFIGURATION = {
+ "distilbert-base-uncased": {"do_lower_case": True},
+ "distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
+ "distilbert-base-cased": {"do_lower_case": False},
+ "distilbert-base-cased-distilled-squad": {"do_lower_case": False},
+ "distilbert-base-german-cased": {"do_lower_case": False},
+ "distilbert-base-multilingual-cased": {"do_lower_case": False},
+}
+
+
+# Copied from transformers.models.bert.tokenization_bert.load_vocab
+def load_vocab(vocab_file):
+ """Loads a vocabulary file into a dictionary."""
+ vocab = collections.OrderedDict()
+ with open(vocab_file, "r", encoding="utf-8") as reader:
+ tokens = reader.readlines()
+ for index, token in enumerate(tokens):
+ token = token.rstrip("\n")
+ vocab[token] = index
+ return vocab
+
+
+# Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
+def whitespace_tokenize(text):
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
+ text = text.strip()
+ if not text:
+ return []
+ tokens = text.split()
+ return tokens
+
+
+class DistilBertTokenizer(PreTrainedTokenizer):
+ r"""
+ Construct a DistilBERT tokenizer. Based on WordPiece.
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ File containing the vocabulary.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
+ Whether or not to do basic tokenization before WordPiece.
+ never_split (`Iterable`, *optional*):
+ Collection of tokens which will never be split during tokenization. Only has an effect when
+ `do_basic_tokenize=True`
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
+ The token used for padding, for example when batching sequences of different lengths.
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters.
+
+ This should likely be deactivated for Japanese (see this
+ [issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file,
+ do_lower_case=True,
+ do_basic_tokenize=True,
+ never_split=None,
+ unk_token="[UNK]",
+ sep_token="[SEP]",
+ pad_token="[PAD]",
+ cls_token="[CLS]",
+ mask_token="[MASK]",
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ **kwargs,
+ ):
+ if not os.path.isfile(vocab_file):
+ raise ValueError(
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
+ " model use `tokenizer = DistilBertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
+ )
+ self.vocab = load_vocab(vocab_file)
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
+ self.do_basic_tokenize = do_basic_tokenize
+ if do_basic_tokenize:
+ self.basic_tokenizer = BasicTokenizer(
+ do_lower_case=do_lower_case,
+ never_split=never_split,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ )
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
+
+ super().__init__(
+ do_lower_case=do_lower_case,
+ do_basic_tokenize=do_basic_tokenize,
+ never_split=never_split,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ **kwargs,
+ )
+
+ @property
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.do_lower_case
+ def do_lower_case(self):
+ return self.basic_tokenizer.do_lower_case
+
+ @property
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.vocab_size
+ def vocab_size(self):
+ return len(self.vocab)
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_vocab
+ def get_vocab(self):
+ return dict(self.vocab, **self.added_tokens_encoder)
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._tokenize
+ def _tokenize(self, text, split_special_tokens=False):
+ split_tokens = []
+ if self.do_basic_tokenize:
+ for token in self.basic_tokenizer.tokenize(
+ text, never_split=self.all_special_tokens if not split_special_tokens else None
+ ):
+ # If the token is part of the never_split set
+ if token in self.basic_tokenizer.never_split:
+ split_tokens.append(token)
+ else:
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
+ else:
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
+ return split_tokens
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_token_to_id
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_id_to_token
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.ids_to_tokens.get(index, self.unk_token)
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.convert_tokens_to_string
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ out_string = " ".join(tokens).replace(" ##", "").strip()
+ return out_string
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.build_inputs_with_special_tokens
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A BERT sequence has the following format:
+
+ - single sequence: `[CLS] X [SEP]`
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ return cls + token_ids_0 + sep + token_ids_1 + sep
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_special_tokens_mask
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is not None:
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1]
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.create_token_type_ids_from_sequences
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
+ pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.save_vocabulary
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ index = 0
+ if os.path.isdir(save_directory):
+ vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+ else:
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
+ with open(vocab_file, "w", encoding="utf-8") as writer:
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
+ if index != token_index:
+ logger.warning(
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
+ " Please check that the vocabulary is not corrupted!"
+ )
+ index = token_index
+ writer.write(token + "\n")
+ index += 1
+ return (vocab_file,)
+
+
+# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
+class BasicTokenizer(object):
+ """
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
+
+ Args:
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ never_split (`Iterable`, *optional*):
+ Collection of tokens which will never be split during tokenization. Only has an effect when
+ `do_basic_tokenize=True`
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters.
+
+ This should likely be deactivated for Japanese (see this
+ [issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
+ the full context of the words, such as contractions.
+ """
+
+ def __init__(
+ self,
+ do_lower_case=True,
+ never_split=None,
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ do_split_on_punc=True,
+ ):
+ if never_split is None:
+ never_split = []
+ self.do_lower_case = do_lower_case
+ self.never_split = set(never_split)
+ self.tokenize_chinese_chars = tokenize_chinese_chars
+ self.strip_accents = strip_accents
+ self.do_split_on_punc = do_split_on_punc
+
+ def tokenize(self, text, never_split=None):
+ """
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
+
+ Args:
+ never_split (`List[str]`, *optional*)
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
+ """
+ # union() returns a new set by concatenating the two sets.
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
+ text = self._clean_text(text)
+
+ # This was added on November 1st, 2018 for the multilingual and Chinese
+ # models. This is also applied to the English models now, but it doesn't
+ # matter since the English models were not trained on any Chinese data
+ # and generally don't have any Chinese data in them (there are Chinese
+ # characters in the vocabulary because Wikipedia does have some Chinese
+ # words in the English Wikipedia.).
+ if self.tokenize_chinese_chars:
+ text = self._tokenize_chinese_chars(text)
+ # prevents treating the same character with different unicode codepoints as different characters
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
+ split_tokens = []
+ for token in orig_tokens:
+ if token not in never_split:
+ if self.do_lower_case:
+ token = token.lower()
+ if self.strip_accents is not False:
+ token = self._run_strip_accents(token)
+ elif self.strip_accents:
+ token = self._run_strip_accents(token)
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
+
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
+ return output_tokens
+
+ def _run_strip_accents(self, text):
+ """Strips accents from a piece of text."""
+ text = unicodedata.normalize("NFD", text)
+ output = []
+ for char in text:
+ cat = unicodedata.category(char)
+ if cat == "Mn":
+ continue
+ output.append(char)
+ return "".join(output)
+
+ def _run_split_on_punc(self, text, never_split=None):
+ """Splits punctuation on a piece of text."""
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
+ return [text]
+ chars = list(text)
+ i = 0
+ start_new_word = True
+ output = []
+ while i < len(chars):
+ char = chars[i]
+ if _is_punctuation(char):
+ output.append([char])
+ start_new_word = True
+ else:
+ if start_new_word:
+ output.append([])
+ start_new_word = False
+ output[-1].append(char)
+ i += 1
+
+ return ["".join(x) for x in output]
+
+ def _tokenize_chinese_chars(self, text):
+ """Adds whitespace around any CJK character."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if self._is_chinese_char(cp):
+ output.append(" ")
+ output.append(char)
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+ def _is_chinese_char(self, cp):
+ """Checks whether CP is the codepoint of a CJK character."""
+ # This defines a "chinese character" as anything in the CJK Unicode block:
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
+ #
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
+ # despite its name. The modern Korean Hangul alphabet is a different block,
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
+ # space-separated words, so they are not treated specially and handled
+ # like the all of the other languages.
+ if (
+ (cp >= 0x4E00 and cp <= 0x9FFF)
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
+ or (cp >= 0xF900 and cp <= 0xFAFF)
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
+ ): #
+ return True
+
+ return False
+
+ def _clean_text(self, text):
+ """Performs invalid character removal and whitespace cleanup on text."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
+ continue
+ if _is_whitespace(char):
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+
+# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
+class WordpieceTokenizer(object):
+ """Runs WordPiece tokenization."""
+
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
+ self.vocab = vocab
+ self.unk_token = unk_token
+ self.max_input_chars_per_word = max_input_chars_per_word
+
+ def tokenize(self, text):
+ """
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
+ tokenization using the given vocabulary.
+
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
+
+ Args:
+ text: A single token or whitespace separated tokens. This should have
+ already been passed through *BasicTokenizer*.
+
+ Returns:
+ A list of wordpiece tokens.
+ """
+
+ output_tokens = []
+ for token in whitespace_tokenize(text):
+ chars = list(token)
+ if len(chars) > self.max_input_chars_per_word:
+ output_tokens.append(self.unk_token)
+ continue
+
+ is_bad = False
+ start = 0
+ sub_tokens = []
+ while start < len(chars):
+ end = len(chars)
+ cur_substr = None
+ while start < end:
+ substr = "".join(chars[start:end])
+ if start > 0:
+ substr = "##" + substr
+ if substr in self.vocab:
+ cur_substr = substr
+ break
+ end -= 1
+ if cur_substr is None:
+ is_bad = True
+ break
+ sub_tokens.append(cur_substr)
+ start = end
+
+ if is_bad:
+ output_tokens.append(self.unk_token)
+ else:
+ output_tokens.extend(sub_tokens)
+ return output_tokens
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert_fast.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..adb90f857d75fef338b71975f27a4d9fcb521b4e
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert_fast.py
@@ -0,0 +1,231 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for DistilBERT."""
+
+import json
+from typing import List, Optional, Tuple
+
+from tokenizers import normalizers
+
+from ...tokenization_utils_fast import PreTrainedTokenizerFast
+from ...utils import logging
+from .tokenization_distilbert import DistilBertTokenizer
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {
+ "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
+ "distilbert-base-uncased-distilled-squad": (
+ "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
+ ),
+ "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
+ "distilbert-base-cased-distilled-squad": (
+ "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
+ ),
+ "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
+ "distilbert-base-multilingual-cased": (
+ "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
+ ),
+ },
+ "tokenizer_file": {
+ "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
+ "distilbert-base-uncased-distilled-squad": (
+ "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
+ ),
+ "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
+ "distilbert-base-cased-distilled-squad": (
+ "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
+ ),
+ "distilbert-base-german-cased": (
+ "https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
+ ),
+ "distilbert-base-multilingual-cased": (
+ "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
+ ),
+ },
+}
+
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
+ "distilbert-base-uncased": 512,
+ "distilbert-base-uncased-distilled-squad": 512,
+ "distilbert-base-cased": 512,
+ "distilbert-base-cased-distilled-squad": 512,
+ "distilbert-base-german-cased": 512,
+ "distilbert-base-multilingual-cased": 512,
+}
+
+
+PRETRAINED_INIT_CONFIGURATION = {
+ "distilbert-base-uncased": {"do_lower_case": True},
+ "distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
+ "distilbert-base-cased": {"do_lower_case": False},
+ "distilbert-base-cased-distilled-squad": {"do_lower_case": False},
+ "distilbert-base-german-cased": {"do_lower_case": False},
+ "distilbert-base-multilingual-cased": {"do_lower_case": False},
+}
+
+
+class DistilBertTokenizerFast(PreTrainedTokenizerFast):
+ r"""
+ Construct a "fast" DistilBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
+
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
+ refer to this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ File containing the vocabulary.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
+ The token used for padding, for example when batching sequences of different lengths.
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ clean_text (`bool`, *optional*, defaults to `True`):
+ Whether or not to clean the text before tokenization by removing any control characters and replacing all
+ whitespaces by the classic one.
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
+ issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
+ The prefix for subwords.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
+ model_input_names = ["input_ids", "attention_mask"]
+ slow_tokenizer_class = DistilBertTokenizer
+
+ def __init__(
+ self,
+ vocab_file=None,
+ tokenizer_file=None,
+ do_lower_case=True,
+ unk_token="[UNK]",
+ sep_token="[SEP]",
+ pad_token="[PAD]",
+ cls_token="[CLS]",
+ mask_token="[MASK]",
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ **kwargs,
+ ):
+ super().__init__(
+ vocab_file,
+ tokenizer_file=tokenizer_file,
+ do_lower_case=do_lower_case,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ **kwargs,
+ )
+
+ normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
+ if (
+ normalizer_state.get("lowercase", do_lower_case) != do_lower_case
+ or normalizer_state.get("strip_accents", strip_accents) != strip_accents
+ or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
+ ):
+ normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
+ normalizer_state["lowercase"] = do_lower_case
+ normalizer_state["strip_accents"] = strip_accents
+ normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
+ self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
+
+ self.do_lower_case = do_lower_case
+
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.build_inputs_with_special_tokens
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A BERT sequence has the following format:
+
+ - single sequence: `[CLS] X [SEP]`
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+
+ if token_ids_1 is not None:
+ output += token_ids_1 + [self.sep_token_id]
+
+ return output
+
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.create_token_type_ids_from_sequences
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
+ pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.save_vocabulary
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
+ return tuple(files)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/ernie/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/ernie/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ea7f077f928d39527ab5cf9ba4f195a62445bb84
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/ernie/__init__.py
@@ -0,0 +1,70 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
+
+
+_import_structure = {
+ "configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_ernie"] = [
+ "ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "ErnieForCausalLM",
+ "ErnieForMaskedLM",
+ "ErnieForMultipleChoice",
+ "ErnieForNextSentencePrediction",
+ "ErnieForPreTraining",
+ "ErnieForQuestionAnswering",
+ "ErnieForSequenceClassification",
+ "ErnieForTokenClassification",
+ "ErnieModel",
+ "ErniePreTrainedModel",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_ernie import (
+ ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
+ ErnieForCausalLM,
+ ErnieForMaskedLM,
+ ErnieForMultipleChoice,
+ ErnieForNextSentencePrediction,
+ ErnieForPreTraining,
+ ErnieForQuestionAnswering,
+ ErnieForSequenceClassification,
+ ErnieForTokenClassification,
+ ErnieModel,
+ ErniePreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..59ee7411863761b766b90a00c8a5303a9932b01e
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/modeling_ernie.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/modeling_ernie.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..415c74a56a79d1bbac0f40f45904c198bf0c4f3f
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/ernie/__pycache__/modeling_ernie.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/ernie/configuration_ernie.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/ernie/configuration_ernie.py
new file mode 100644
index 0000000000000000000000000000000000000000..7278a74eced5175cb6727518f4f15f140446239a
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/ernie/configuration_ernie.py
@@ -0,0 +1,172 @@
+# coding=utf-8
+# Copyright 2022 The Google AI Language Team Authors and The HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" ERNIE model configuration"""
+from collections import OrderedDict
+from typing import Mapping
+
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "nghuyong/ernie-1.0-base-zh": "https://huggingface.co/nghuyong/ernie-1.0-base-zh/resolve/main/config.json",
+ "nghuyong/ernie-2.0-base-en": "https://huggingface.co/nghuyong/ernie-2.0-base-en/resolve/main/config.json",
+ "nghuyong/ernie-2.0-large-en": "https://huggingface.co/nghuyong/ernie-2.0-large-en/resolve/main/config.json",
+ "nghuyong/ernie-3.0-base-zh": "https://huggingface.co/nghuyong/ernie-3.0-base-zh/resolve/main/config.json",
+ "nghuyong/ernie-3.0-medium-zh": "https://huggingface.co/nghuyong/ernie-3.0-medium-zh/resolve/main/config.json",
+ "nghuyong/ernie-3.0-mini-zh": "https://huggingface.co/nghuyong/ernie-3.0-mini-zh/resolve/main/config.json",
+ "nghuyong/ernie-3.0-micro-zh": "https://huggingface.co/nghuyong/ernie-3.0-micro-zh/resolve/main/config.json",
+ "nghuyong/ernie-3.0-nano-zh": "https://huggingface.co/nghuyong/ernie-3.0-nano-zh/resolve/main/config.json",
+ "nghuyong/ernie-gram-zh": "https://huggingface.co/nghuyong/ernie-gram-zh/resolve/main/config.json",
+ "nghuyong/ernie-health-zh": "https://huggingface.co/nghuyong/ernie-health-zh/resolve/main/config.json",
+}
+
+
+class ErnieConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`ErnieModel`] or a [`TFErnieModel`]. It is used to
+ instantiate a ERNIE model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the ERNIE
+ [nghuyong/ernie-3.0-base-zh](https://huggingface.co/nghuyong/ernie-3.0-base-zh) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 30522):
+ Vocabulary size of the ERNIE model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`ErnieModel`] or [`TFErnieModel`].
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ type_vocab_size (`int`, *optional*, defaults to 2):
+ The vocabulary size of the `token_type_ids` passed when calling [`ErnieModel`] or [`TFErnieModel`].
+ task_type_vocab_size (`int`, *optional*, defaults to 3):
+ The vocabulary size of the `task_type_ids` for ERNIE2.0/ERNIE3.0 model
+ use_task_id (`bool`, *optional*, defaults to `False`):
+ Whether or not the model support `task_type_ids`
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ pad_token_id (`int`, *optional*, defaults to 0):
+ Padding token id.
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`.
+ classifier_dropout (`float`, *optional*):
+ The dropout ratio for the classification head.
+
+ Examples:
+
+ ```python
+ >>> from transformers import ErnieConfig, ErnieModel
+
+ >>> # Initializing a ERNIE nghuyong/ernie-3.0-base-zh style configuration
+ >>> configuration = ErnieConfig()
+
+ >>> # Initializing a model (with random weights) from the nghuyong/ernie-3.0-base-zh style configuration
+ >>> model = ErnieModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "ernie"
+
+ def __init__(
+ self,
+ vocab_size=30522,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.1,
+ attention_probs_dropout_prob=0.1,
+ max_position_embeddings=512,
+ type_vocab_size=2,
+ task_type_vocab_size=3,
+ use_task_id=False,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ pad_token_id=0,
+ position_embedding_type="absolute",
+ use_cache=True,
+ classifier_dropout=None,
+ **kwargs,
+ ):
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
+
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.hidden_act = hidden_act
+ self.intermediate_size = intermediate_size
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.max_position_embeddings = max_position_embeddings
+ self.type_vocab_size = type_vocab_size
+ self.task_type_vocab_size = task_type_vocab_size
+ self.use_task_id = use_task_id
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.position_embedding_type = position_embedding_type
+ self.use_cache = use_cache
+ self.classifier_dropout = classifier_dropout
+
+
+class ErnieOnnxConfig(OnnxConfig):
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ if self.task == "multiple-choice":
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
+ else:
+ dynamic_axis = {0: "batch", 1: "sequence"}
+ return OrderedDict(
+ [
+ ("input_ids", dynamic_axis),
+ ("attention_mask", dynamic_axis),
+ ("token_type_ids", dynamic_axis),
+ ("task_type_ids", dynamic_axis),
+ ]
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/ernie/modeling_ernie.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/ernie/modeling_ernie.py
new file mode 100644
index 0000000000000000000000000000000000000000..291ab6c54d1e5051ab8bdb3184c22e79fd04e033
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/ernie/modeling_ernie.py
@@ -0,0 +1,1832 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch ERNIE model."""
+
+
+import math
+import warnings
+from dataclasses import dataclass
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import (
+ BaseModelOutputWithPastAndCrossAttentions,
+ BaseModelOutputWithPoolingAndCrossAttentions,
+ CausalLMOutputWithCrossAttentions,
+ MaskedLMOutput,
+ MultipleChoiceModelOutput,
+ NextSentencePredictorOutput,
+ QuestionAnsweringModelOutput,
+ SequenceClassifierOutput,
+ TokenClassifierOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_ernie import ErnieConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "nghuyong/ernie-1.0-base-zh"
+_CONFIG_FOR_DOC = "ErnieConfig"
+
+
+ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "nghuyong/ernie-1.0-base-zh",
+ "nghuyong/ernie-2.0-base-en",
+ "nghuyong/ernie-2.0-large-en",
+ "nghuyong/ernie-3.0-base-zh",
+ "nghuyong/ernie-3.0-medium-zh",
+ "nghuyong/ernie-3.0-mini-zh",
+ "nghuyong/ernie-3.0-micro-zh",
+ "nghuyong/ernie-3.0-nano-zh",
+ "nghuyong/ernie-gram-zh",
+ "nghuyong/ernie-health-zh",
+ # See all ERNIE models at https://huggingface.co/models?filter=ernie
+]
+
+
+class ErnieEmbeddings(nn.Module):
+ """Construct the embeddings from word, position and token_type embeddings."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
+ self.use_task_id = config.use_task_id
+ if config.use_task_id:
+ self.task_type_embeddings = nn.Embedding(config.task_type_vocab_size, config.hidden_size)
+
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
+ # any TensorFlow checkpoint file
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
+ )
+ self.register_buffer(
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
+ )
+
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ task_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ past_key_values_length: int = 0,
+ ) -> torch.Tensor:
+ if input_ids is not None:
+ input_shape = input_ids.size()
+ else:
+ input_shape = inputs_embeds.size()[:-1]
+
+ seq_length = input_shape[1]
+
+ if position_ids is None:
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
+
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
+ # issue #5664
+ if token_type_ids is None:
+ if hasattr(self, "token_type_ids"):
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
+ token_type_ids = buffered_token_type_ids_expanded
+ else:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
+
+ embeddings = inputs_embeds + token_type_embeddings
+ if self.position_embedding_type == "absolute":
+ position_embeddings = self.position_embeddings(position_ids)
+ embeddings += position_embeddings
+
+ # add `task_type_id` for ERNIE model
+ if self.use_task_id:
+ if task_type_ids is None:
+ task_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
+ task_type_embeddings = self.task_type_embeddings(task_type_ids)
+ embeddings += task_type_embeddings
+
+ embeddings = self.LayerNorm(embeddings)
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+
+# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Ernie
+class ErnieSelfAttention(nn.Module):
+ def __init__(self, config, position_embedding_type=None):
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads})"
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+ self.position_embedding_type = position_embedding_type or getattr(
+ config, "position_embedding_type", "absolute"
+ )
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ self.max_position_embeddings = config.max_position_embeddings
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
+
+ self.is_decoder = config.is_decoder
+
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ mixed_query_layer = self.query(hidden_states)
+
+ # If this is instantiated as a cross-attention module, the keys
+ # and values come from an encoder; the attention mask needs to be
+ # such that the encoder's padding tokens are not attended to.
+ is_cross_attention = encoder_hidden_states is not None
+
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_layer = past_key_value[0]
+ value_layer = past_key_value[1]
+ attention_mask = encoder_attention_mask
+ elif is_cross_attention:
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
+ attention_mask = encoder_attention_mask
+ elif past_key_value is not None:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
+ else:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ use_cache = past_key_value is not None
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_layer, value_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
+ if use_cache:
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
+ -1, 1
+ )
+ else:
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
+ distance = position_ids_l - position_ids_r
+
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
+
+ if self.position_embedding_type == "relative_key":
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores
+ elif self.position_embedding_type == "relative_key_query":
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in ErnieModel forward() function)
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ if self.is_decoder:
+ outputs = outputs + (past_key_value,)
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->Ernie
+class ErnieSelfOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Ernie
+class ErnieAttention(nn.Module):
+ def __init__(self, config, position_embedding_type=None):
+ super().__init__()
+ self.self = ErnieSelfAttention(config, position_embedding_type=position_embedding_type)
+ self.output = ErnieSelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.self.query = prune_linear_layer(self.self.query, index)
+ self.self.key = prune_linear_layer(self.self.key, index)
+ self.self.value = prune_linear_layer(self.self.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ self_outputs = self.self(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+ attention_output = self.output(self_outputs[0], hidden_states)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Ernie
+class ErnieIntermediate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->Ernie
+class ErnieOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Ernie
+class ErnieLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.attention = ErnieAttention(config)
+ self.is_decoder = config.is_decoder
+ self.add_cross_attention = config.add_cross_attention
+ if self.add_cross_attention:
+ if not self.is_decoder:
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
+ self.crossattention = ErnieAttention(config, position_embedding_type="absolute")
+ self.intermediate = ErnieIntermediate(config)
+ self.output = ErnieOutput(config)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ self_attention_outputs = self.attention(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ output_attentions=output_attentions,
+ past_key_value=self_attn_past_key_value,
+ )
+ attention_output = self_attention_outputs[0]
+
+ # if decoder, the last output is tuple of self-attn cache
+ if self.is_decoder:
+ outputs = self_attention_outputs[1:-1]
+ present_key_value = self_attention_outputs[-1]
+ else:
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ cross_attn_present_key_value = None
+ if self.is_decoder and encoder_hidden_states is not None:
+ if not hasattr(self, "crossattention"):
+ raise ValueError(
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
+ " by setting `config.add_cross_attention=True`"
+ )
+
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ cross_attention_outputs = self.crossattention(
+ attention_output,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ cross_attn_past_key_value,
+ output_attentions,
+ )
+ attention_output = cross_attention_outputs[0]
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
+
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
+ cross_attn_present_key_value = cross_attention_outputs[-1]
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ layer_output = apply_chunking_to_forward(
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
+ )
+ outputs = (layer_output,) + outputs
+
+ # if decoder, return the attn key/values as the last output
+ if self.is_decoder:
+ outputs = outputs + (present_key_value,)
+
+ return outputs
+
+ def feed_forward_chunk(self, attention_output):
+ intermediate_output = self.intermediate(attention_output)
+ layer_output = self.output(intermediate_output, attention_output)
+ return layer_output
+
+
+# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Ernie
+class ErnieEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList([ErnieLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = False,
+ output_hidden_states: Optional[bool] = False,
+ return_dict: Optional[bool] = True,
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ next_decoder_cache = () if use_cache else None
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+ past_key_value = past_key_values[i] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+ if use_cache:
+ next_decoder_cache += (layer_outputs[-1],)
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+ if self.config.add_cross_attention:
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ hidden_states,
+ next_decoder_cache,
+ all_hidden_states,
+ all_self_attentions,
+ all_cross_attentions,
+ ]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_decoder_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->Ernie
+class ErniePooler(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.activation = nn.Tanh()
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(first_token_tensor)
+ pooled_output = self.activation(pooled_output)
+ return pooled_output
+
+
+# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->Ernie
+class ErniePredictionHeadTransform(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ if isinstance(config.hidden_act, str):
+ self.transform_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.transform_act_fn = config.hidden_act
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.transform_act_fn(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->Ernie
+class ErnieLMPredictionHead(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.transform = ErniePredictionHeadTransform(config)
+
+ # The output weights are the same as the input embeddings, but there is
+ # an output-only bias for each token.
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
+
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
+ self.decoder.bias = self.bias
+
+ def forward(self, hidden_states):
+ hidden_states = self.transform(hidden_states)
+ hidden_states = self.decoder(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->Ernie
+class ErnieOnlyMLMHead(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.predictions = ErnieLMPredictionHead(config)
+
+ def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
+ prediction_scores = self.predictions(sequence_output)
+ return prediction_scores
+
+
+# Copied from transformers.models.bert.modeling_bert.BertOnlyNSPHead with Bert->Ernie
+class ErnieOnlyNSPHead(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
+
+ def forward(self, pooled_output):
+ seq_relationship_score = self.seq_relationship(pooled_output)
+ return seq_relationship_score
+
+
+# Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->Ernie
+class ErniePreTrainingHeads(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.predictions = ErnieLMPredictionHead(config)
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
+
+ def forward(self, sequence_output, pooled_output):
+ prediction_scores = self.predictions(sequence_output)
+ seq_relationship_score = self.seq_relationship(pooled_output)
+ return prediction_scores, seq_relationship_score
+
+
+class ErniePreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = ErnieConfig
+ base_model_prefix = "ernie"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+@dataclass
+# Copied from transformers.models.bert.modeling_bert.BertForPreTrainingOutput with Bert->Ernie
+class ErnieForPreTrainingOutput(ModelOutput):
+ """
+ Output type of [`ErnieForPreTraining`].
+
+ Args:
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction
+ (classification) loss.
+ prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
+ before SoftMax).
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ prediction_logits: torch.FloatTensor = None
+ seq_relationship_logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+ERNIE_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`ErnieConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+ERNIE_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ task_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Task type embedding is a special embedding to represent the characteristic of different tasks, such as
+ word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We
+ assign a `task_type_id` to each task and the `task_type_id` is in the range `[0,
+ config.task_type_vocab_size-1]
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare Ernie Model transformer outputting raw hidden-states without any specific head on top.",
+ ERNIE_START_DOCSTRING,
+)
+class ErnieModel(ErniePreTrainedModel):
+ """
+
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
+
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
+ """
+
+ # Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->Ernie
+ def __init__(self, config, add_pooling_layer=True):
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = ErnieEmbeddings(config)
+ self.encoder = ErnieEncoder(config)
+
+ self.pooler = ErniePooler(config) if add_pooling_layer else None
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ # Copied from transformers.models.bert.modeling_bert.BertModel.get_input_embeddings
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ # Copied from transformers.models.bert.modeling_bert.BertModel.set_input_embeddings
+ def set_input_embeddings(self, value):
+ self.embeddings.word_embeddings = value
+
+ # Copied from transformers.models.bert.modeling_bert.BertModel._prune_heads
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithPoolingAndCrossAttentions,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ task_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
+ r"""
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if self.config.is_decoder:
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ else:
+ use_cache = False
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ batch_size, seq_length = input_shape
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ # past_key_values_length
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+
+ if attention_mask is None:
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
+
+ if token_type_ids is None:
+ if hasattr(self.embeddings, "token_type_ids"):
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
+ token_type_ids = buffered_token_type_ids_expanded
+ else:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
+
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
+ # ourselves in which case we just need to make it broadcastable to all heads.
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
+
+ # If a 2D or 3D attention mask is provided for the cross-attention
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
+ if self.config.is_decoder and encoder_hidden_states is not None:
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
+ if encoder_attention_mask is None:
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
+ else:
+ encoder_extended_attention_mask = None
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ embedding_output = self.embeddings(
+ input_ids=input_ids,
+ position_ids=position_ids,
+ token_type_ids=token_type_ids,
+ task_type_ids=task_type_ids,
+ inputs_embeds=inputs_embeds,
+ past_key_values_length=past_key_values_length,
+ )
+ encoder_outputs = self.encoder(
+ embedding_output,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_extended_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
+
+ if not return_dict:
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPoolingAndCrossAttentions(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ past_key_values=encoder_outputs.past_key_values,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ cross_attentions=encoder_outputs.cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Ernie Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
+ sentence prediction (classification)` head.
+ """,
+ ERNIE_START_DOCSTRING,
+)
+class ErnieForPreTraining(ErniePreTrainedModel):
+ _tied_weights_keys = ["cls.predictions.decoder.bias", "cls.predictions.decoder.weight"]
+
+ # Copied from transformers.models.bert.modeling_bert.BertForPreTraining.__init__ with Bert->Ernie,bert->ernie
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.ernie = ErnieModel(config)
+ self.cls = ErniePreTrainingHeads(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ # Copied from transformers.models.bert.modeling_bert.BertForPreTraining.get_output_embeddings
+ def get_output_embeddings(self):
+ return self.cls.predictions.decoder
+
+ # Copied from transformers.models.bert.modeling_bert.BertForPreTraining.set_output_embeddings
+ def set_output_embeddings(self, new_embeddings):
+ self.cls.predictions.decoder = new_embeddings
+
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=ErnieForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ task_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ next_sentence_label: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], ErnieForPreTrainingOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked),
+ the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence
+ pair (see `input_ids` docstring) Indices should be in `[0, 1]`:
+
+ - 0 indicates sequence B is a continuation of sequence A,
+ - 1 indicates sequence B is a random sequence.
+ kwargs (`Dict[str, any]`, optional, defaults to *{}*):
+ Used to hide legacy arguments that have been deprecated.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, ErnieForPreTraining
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("nghuyong/ernie-1.0-base-zh")
+ >>> model = ErnieForPreTraining.from_pretrained("nghuyong/ernie-1.0-base-zh")
+
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
+ >>> outputs = model(**inputs)
+
+ >>> prediction_logits = outputs.prediction_logits
+ >>> seq_relationship_logits = outputs.seq_relationship_logits
+ ```
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.ernie(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ task_type_ids=task_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output, pooled_output = outputs[:2]
+ prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
+
+ total_loss = None
+ if labels is not None and next_sentence_label is not None:
+ loss_fct = CrossEntropyLoss()
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
+ next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
+ total_loss = masked_lm_loss + next_sentence_loss
+
+ if not return_dict:
+ output = (prediction_scores, seq_relationship_score) + outputs[2:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return ErnieForPreTrainingOutput(
+ loss=total_loss,
+ prediction_logits=prediction_scores,
+ seq_relationship_logits=seq_relationship_score,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """Ernie Model with a `language modeling` head on top for CLM fine-tuning.""", ERNIE_START_DOCSTRING
+)
+class ErnieForCausalLM(ErniePreTrainedModel):
+ _tied_weights_keys = ["cls.predictions.decoder.bias", "cls.predictions.decoder.weight"]
+
+ # Copied from transformers.models.bert.modeling_bert.BertLMHeadModel.__init__ with BertLMHeadModel->ErnieForCausalLM,Bert->Ernie,bert->ernie
+ def __init__(self, config):
+ super().__init__(config)
+
+ if not config.is_decoder:
+ logger.warning("If you want to use `ErnieForCausalLM` as a standalone, add `is_decoder=True.`")
+
+ self.ernie = ErnieModel(config, add_pooling_layer=False)
+ self.cls = ErnieOnlyMLMHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ # Copied from transformers.models.bert.modeling_bert.BertLMHeadModel.get_output_embeddings
+ def get_output_embeddings(self):
+ return self.cls.predictions.decoder
+
+ # Copied from transformers.models.bert.modeling_bert.BertLMHeadModel.set_output_embeddings
+ def set_output_embeddings(self, new_embeddings):
+ self.cls.predictions.decoder = new_embeddings
+
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=CausalLMOutputWithCrossAttentions,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ task_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.Tensor]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
+ r"""
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
+ ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ if labels is not None:
+ use_cache = False
+
+ outputs = self.ernie(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ task_type_ids=task_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+ prediction_scores = self.cls(sequence_output)
+
+ lm_loss = None
+ if labels is not None:
+ # we are doing next-token prediction; shift prediction scores and input ids by one
+ shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
+ labels = labels[:, 1:].contiguous()
+ loss_fct = CrossEntropyLoss()
+ lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[2:]
+ return ((lm_loss,) + output) if lm_loss is not None else output
+
+ return CausalLMOutputWithCrossAttentions(
+ loss=lm_loss,
+ logits=prediction_scores,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+ # Copied from transformers.models.bert.modeling_bert.BertLMHeadModel.prepare_inputs_for_generation
+ def prepare_inputs_for_generation(
+ self, input_ids, past_key_values=None, attention_mask=None, use_cache=True, **model_kwargs
+ ):
+ input_shape = input_ids.shape
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
+ if attention_mask is None:
+ attention_mask = input_ids.new_ones(input_shape)
+
+ # cut decoder_input_ids if past_key_values is used
+ if past_key_values is not None:
+ past_length = past_key_values[0][0].shape[2]
+
+ # Some generation methods already pass only the last input ID
+ if input_ids.shape[1] > past_length:
+ remove_prefix_length = past_length
+ else:
+ # Default to old behavior: keep only final ID
+ remove_prefix_length = input_ids.shape[1] - 1
+
+ input_ids = input_ids[:, remove_prefix_length:]
+
+ return {
+ "input_ids": input_ids,
+ "attention_mask": attention_mask,
+ "past_key_values": past_key_values,
+ "use_cache": use_cache,
+ }
+
+ # Copied from transformers.models.bert.modeling_bert.BertLMHeadModel._reorder_cache
+ def _reorder_cache(self, past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+ return reordered_past
+
+
+@add_start_docstrings("""Ernie Model with a `language modeling` head on top.""", ERNIE_START_DOCSTRING)
+class ErnieForMaskedLM(ErniePreTrainedModel):
+ _tied_weights_keys = ["cls.predictions.decoder.bias", "cls.predictions.decoder.weight"]
+
+ # Copied from transformers.models.bert.modeling_bert.BertForMaskedLM.__init__ with Bert->Ernie,bert->ernie
+ def __init__(self, config):
+ super().__init__(config)
+
+ if config.is_decoder:
+ logger.warning(
+ "If you want to use `ErnieForMaskedLM` make sure `config.is_decoder=False` for "
+ "bi-directional self-attention."
+ )
+
+ self.ernie = ErnieModel(config, add_pooling_layer=False)
+ self.cls = ErnieOnlyMLMHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ # Copied from transformers.models.bert.modeling_bert.BertForMaskedLM.get_output_embeddings
+ def get_output_embeddings(self):
+ return self.cls.predictions.decoder
+
+ # Copied from transformers.models.bert.modeling_bert.BertForMaskedLM.set_output_embeddings
+ def set_output_embeddings(self, new_embeddings):
+ self.cls.predictions.decoder = new_embeddings
+
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=MaskedLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output="'paris'",
+ expected_loss=0.88,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ task_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ """
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.ernie(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ task_type_ids=task_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+ prediction_scores = self.cls(sequence_output)
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[2:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return MaskedLMOutput(
+ loss=masked_lm_loss,
+ logits=prediction_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ # Copied from transformers.models.bert.modeling_bert.BertForMaskedLM.prepare_inputs_for_generation
+ def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
+ input_shape = input_ids.shape
+ effective_batch_size = input_shape[0]
+
+ # add a dummy token
+ if self.config.pad_token_id is None:
+ raise ValueError("The PAD token should be defined for generation")
+
+ attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
+ dummy_token = torch.full(
+ (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
+ )
+ input_ids = torch.cat([input_ids, dummy_token], dim=1)
+
+ return {"input_ids": input_ids, "attention_mask": attention_mask}
+
+
+@add_start_docstrings(
+ """Ernie Model with a `next sentence prediction (classification)` head on top.""",
+ ERNIE_START_DOCSTRING,
+)
+class ErnieForNextSentencePrediction(ErniePreTrainedModel):
+ # Copied from transformers.models.bert.modeling_bert.BertForNextSentencePrediction.__init__ with Bert->Ernie,bert->ernie
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.ernie = ErnieModel(config)
+ self.cls = ErnieOnlyNSPHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ task_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs,
+ ) -> Union[Tuple[torch.Tensor], NextSentencePredictorOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
+ (see `input_ids` docstring). Indices should be in `[0, 1]`:
+
+ - 0 indicates sequence B is a continuation of sequence A,
+ - 1 indicates sequence B is a random sequence.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, ErnieForNextSentencePrediction
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("nghuyong/ernie-1.0-base-zh")
+ >>> model = ErnieForNextSentencePrediction.from_pretrained("nghuyong/ernie-1.0-base-zh")
+
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
+ >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
+ >>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
+
+ >>> outputs = model(**encoding, labels=torch.LongTensor([1]))
+ >>> logits = outputs.logits
+ >>> assert logits[0, 0] < logits[0, 1] # next sentence was random
+ ```
+ """
+
+ if "next_sentence_label" in kwargs:
+ warnings.warn(
+ "The `next_sentence_label` argument is deprecated and will be removed in a future version, use"
+ " `labels` instead.",
+ FutureWarning,
+ )
+ labels = kwargs.pop("next_sentence_label")
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.ernie(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ task_type_ids=task_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = outputs[1]
+
+ seq_relationship_scores = self.cls(pooled_output)
+
+ next_sentence_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
+
+ if not return_dict:
+ output = (seq_relationship_scores,) + outputs[2:]
+ return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
+
+ return NextSentencePredictorOutput(
+ loss=next_sentence_loss,
+ logits=seq_relationship_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Ernie Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
+ output) e.g. for GLUE tasks.
+ """,
+ ERNIE_START_DOCSTRING,
+)
+class ErnieForSequenceClassification(ErniePreTrainedModel):
+ # Copied from transformers.models.bert.modeling_bert.BertForSequenceClassification.__init__ with Bert->Ernie,bert->ernie
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.config = config
+
+ self.ernie = ErnieModel(config)
+ classifier_dropout = (
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
+ )
+ self.dropout = nn.Dropout(classifier_dropout)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ task_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.ernie(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ task_type_ids=task_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = outputs[1]
+
+ pooled_output = self.dropout(pooled_output)
+ logits = self.classifier(pooled_output)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Ernie Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
+ softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ ERNIE_START_DOCSTRING,
+)
+class ErnieForMultipleChoice(ErniePreTrainedModel):
+ # Copied from transformers.models.bert.modeling_bert.BertForMultipleChoice.__init__ with Bert->Ernie,bert->ernie
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.ernie = ErnieModel(config)
+ classifier_dropout = (
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
+ )
+ self.dropout = nn.Dropout(classifier_dropout)
+ self.classifier = nn.Linear(config.hidden_size, 1)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=MultipleChoiceModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ task_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
+ `input_ids` above)
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
+
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
+ inputs_embeds = (
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
+ if inputs_embeds is not None
+ else None
+ )
+
+ outputs = self.ernie(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ task_type_ids=task_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = outputs[1]
+
+ pooled_output = self.dropout(pooled_output)
+ logits = self.classifier(pooled_output)
+ reshaped_logits = logits.view(-1, num_choices)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(reshaped_logits, labels)
+
+ if not return_dict:
+ output = (reshaped_logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return MultipleChoiceModelOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Ernie Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
+ Named-Entity-Recognition (NER) tasks.
+ """,
+ ERNIE_START_DOCSTRING,
+)
+class ErnieForTokenClassification(ErniePreTrainedModel):
+ # Copied from transformers.models.bert.modeling_bert.BertForTokenClassification.__init__ with Bert->Ernie,bert->ernie
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.ernie = ErnieModel(config, add_pooling_layer=False)
+ classifier_dropout = (
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
+ )
+ self.dropout = nn.Dropout(classifier_dropout)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ task_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.ernie(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ task_type_ids=task_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ sequence_output = self.dropout(sequence_output)
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Ernie Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ ERNIE_START_DOCSTRING,
+)
+class ErnieForQuestionAnswering(ErniePreTrainedModel):
+ # Copied from transformers.models.bert.modeling_bert.BertForQuestionAnswering.__init__ with Bert->Ernie,bert->ernie
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.ernie = ErnieModel(config, add_pooling_layer=False)
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ task_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ start_positions: Optional[torch.Tensor] = None,
+ end_positions: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.ernie(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ task_type_ids=task_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = logits.split(1, dim=-1)
+ start_logits = start_logits.squeeze(-1).contiguous()
+ end_logits = end_logits.squeeze(-1).contiguous()
+
+ total_loss = None
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, split add a dimension
+ if len(start_positions.size()) > 1:
+ start_positions = start_positions.squeeze(-1)
+ if len(end_positions.size()) > 1:
+ end_positions = end_positions.squeeze(-1)
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
+ ignored_index = start_logits.size(1)
+ start_positions = start_positions.clamp(0, ignored_index)
+ end_positions = end_positions.clamp(0, ignored_index)
+
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[2:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return QuestionAnsweringModelOutput(
+ loss=total_loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1fd5cbf1dc272ec4d9747e2dd1616ed37d5acdbf
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__init__.py
@@ -0,0 +1,77 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "configuration_fastspeech2_conformer": [
+ "FASTSPEECH2_CONFORMER_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP",
+ "FASTSPEECH2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
+ "FASTSPEECH2_CONFORMER_WITH_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP",
+ "FastSpeech2ConformerConfig",
+ "FastSpeech2ConformerHifiGanConfig",
+ "FastSpeech2ConformerWithHifiGanConfig",
+ ],
+ "tokenization_fastspeech2_conformer": ["FastSpeech2ConformerTokenizer"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_fastspeech2_conformer"] = [
+ "FASTSPEECH2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "FastSpeech2ConformerWithHifiGan",
+ "FastSpeech2ConformerHifiGan",
+ "FastSpeech2ConformerModel",
+ "FastSpeech2ConformerPreTrainedModel",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_fastspeech2_conformer import (
+ FASTSPEECH2_CONFORMER_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP,
+ FASTSPEECH2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
+ FASTSPEECH2_CONFORMER_WITH_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP,
+ FastSpeech2ConformerConfig,
+ FastSpeech2ConformerHifiGanConfig,
+ FastSpeech2ConformerWithHifiGanConfig,
+ )
+ from .tokenization_fastspeech2_conformer import FastSpeech2ConformerTokenizer
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_fastspeech2_conformer import (
+ FASTSPEECH2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
+ FastSpeech2ConformerHifiGan,
+ FastSpeech2ConformerModel,
+ FastSpeech2ConformerPreTrainedModel,
+ FastSpeech2ConformerWithHifiGan,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/configuration_fastspeech2_conformer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/configuration_fastspeech2_conformer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..eae584a83543cebd68d138fe1dbd121c47f71d8e
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/configuration_fastspeech2_conformer.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/convert_fastspeech2_conformer_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/convert_fastspeech2_conformer_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..34067ecddfe8528d53cf7c99652ad3354783397f
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/convert_fastspeech2_conformer_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/convert_hifigan.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/convert_hifigan.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..af872b0ae3443218b37a533d6992ecf9d84322f3
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/convert_hifigan.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/modeling_fastspeech2_conformer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/modeling_fastspeech2_conformer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..216d015093b24f40e35f2053b07cba379290fdae
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/__pycache__/modeling_fastspeech2_conformer.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..46dc10adb2900e3171b34d7203d3e5fc827d7259
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py
@@ -0,0 +1,488 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" FastSpeech2Conformer model configuration"""
+
+from typing import Dict
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+FASTSPEECH2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "espnet/fastspeech2_conformer": "https://huggingface.co/espnet/fastspeech2_conformer/raw/main/config.json",
+}
+
+FASTSPEECH2_CONFORMER_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "espnet/fastspeech2_conformer_hifigan": "https://huggingface.co/espnet/fastspeech2_conformer_hifigan/raw/main/config.json",
+}
+
+FASTSPEECH2_CONFORMER_WITH_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "espnet/fastspeech2_conformer_with_hifigan": "https://huggingface.co/espnet/fastspeech2_conformer_with_hifigan/raw/main/config.json",
+}
+
+
+class FastSpeech2ConformerConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`FastSpeech2ConformerModel`]. It is used to
+ instantiate a FastSpeech2Conformer model according to the specified arguments, defining the model architecture.
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the
+ FastSpeech2Conformer [espnet/fastspeech2_conformer](https://huggingface.co/espnet/fastspeech2_conformer)
+ architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ hidden_size (`int`, *optional*, defaults to 384):
+ The dimensionality of the hidden layers.
+ vocab_size (`int`, *optional*, defaults to 78):
+ The size of the vocabulary.
+ num_mel_bins (`int`, *optional*, defaults to 80):
+ The number of mel filters used in the filter bank.
+ encoder_num_attention_heads (`int`, *optional*, defaults to 2):
+ The number of attention heads in the encoder.
+ encoder_layers (`int`, *optional*, defaults to 4):
+ The number of layers in the encoder.
+ encoder_linear_units (`int`, *optional*, defaults to 1536):
+ The number of units in the linear layer of the encoder.
+ decoder_layers (`int`, *optional*, defaults to 4):
+ The number of layers in the decoder.
+ decoder_num_attention_heads (`int`, *optional*, defaults to 2):
+ The number of attention heads in the decoder.
+ decoder_linear_units (`int`, *optional*, defaults to 1536):
+ The number of units in the linear layer of the decoder.
+ speech_decoder_postnet_layers (`int`, *optional*, defaults to 5):
+ The number of layers in the post-net of the speech decoder.
+ speech_decoder_postnet_units (`int`, *optional*, defaults to 256):
+ The number of units in the post-net layers of the speech decoder.
+ speech_decoder_postnet_kernel (`int`, *optional*, defaults to 5):
+ The kernel size in the post-net of the speech decoder.
+ positionwise_conv_kernel_size (`int`, *optional*, defaults to 3):
+ The size of the convolution kernel used in the position-wise layer.
+ encoder_normalize_before (`bool`, *optional*, defaults to `False`):
+ Specifies whether to normalize before encoder layers.
+ decoder_normalize_before (`bool`, *optional*, defaults to `False`):
+ Specifies whether to normalize before decoder layers.
+ encoder_concat_after (`bool`, *optional*, defaults to `False`):
+ Specifies whether to concatenate after encoder layers.
+ decoder_concat_after (`bool`, *optional*, defaults to `False`):
+ Specifies whether to concatenate after decoder layers.
+ reduction_factor (`int`, *optional*, defaults to 1):
+ The factor by which the speech frame rate is reduced.
+ speaking_speed (`float`, *optional*, defaults to 1.0):
+ The speed of the speech produced.
+ use_macaron_style_in_conformer (`bool`, *optional*, defaults to `True`):
+ Specifies whether to use macaron style in the conformer.
+ use_cnn_in_conformer (`bool`, *optional*, defaults to `True`):
+ Specifies whether to use convolutional neural networks in the conformer.
+ encoder_kernel_size (`int`, *optional*, defaults to 7):
+ The kernel size used in the encoder.
+ decoder_kernel_size (`int`, *optional*, defaults to 31):
+ The kernel size used in the decoder.
+ duration_predictor_layers (`int`, *optional*, defaults to 2):
+ The number of layers in the duration predictor.
+ duration_predictor_channels (`int`, *optional*, defaults to 256):
+ The number of channels in the duration predictor.
+ duration_predictor_kernel_size (`int`, *optional*, defaults to 3):
+ The kernel size used in the duration predictor.
+ energy_predictor_layers (`int`, *optional*, defaults to 2):
+ The number of layers in the energy predictor.
+ energy_predictor_channels (`int`, *optional*, defaults to 256):
+ The number of channels in the energy predictor.
+ energy_predictor_kernel_size (`int`, *optional*, defaults to 3):
+ The kernel size used in the energy predictor.
+ energy_predictor_dropout (`float`, *optional*, defaults to 0.5):
+ The dropout rate in the energy predictor.
+ energy_embed_kernel_size (`int`, *optional*, defaults to 1):
+ The kernel size used in the energy embed layer.
+ energy_embed_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout rate in the energy embed layer.
+ stop_gradient_from_energy_predictor (`bool`, *optional*, defaults to `False`):
+ Specifies whether to stop gradients from the energy predictor.
+ pitch_predictor_layers (`int`, *optional*, defaults to 5):
+ The number of layers in the pitch predictor.
+ pitch_predictor_channels (`int`, *optional*, defaults to 256):
+ The number of channels in the pitch predictor.
+ pitch_predictor_kernel_size (`int`, *optional*, defaults to 5):
+ The kernel size used in the pitch predictor.
+ pitch_predictor_dropout (`float`, *optional*, defaults to 0.5):
+ The dropout rate in the pitch predictor.
+ pitch_embed_kernel_size (`int`, *optional*, defaults to 1):
+ The kernel size used in the pitch embed layer.
+ pitch_embed_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout rate in the pitch embed layer.
+ stop_gradient_from_pitch_predictor (`bool`, *optional*, defaults to `True`):
+ Specifies whether to stop gradients from the pitch predictor.
+ encoder_dropout_rate (`float`, *optional*, defaults to 0.2):
+ The dropout rate in the encoder.
+ encoder_positional_dropout_rate (`float`, *optional*, defaults to 0.2):
+ The positional dropout rate in the encoder.
+ encoder_attention_dropout_rate (`float`, *optional*, defaults to 0.2):
+ The attention dropout rate in the encoder.
+ decoder_dropout_rate (`float`, *optional*, defaults to 0.2):
+ The dropout rate in the decoder.
+ decoder_positional_dropout_rate (`float`, *optional*, defaults to 0.2):
+ The positional dropout rate in the decoder.
+ decoder_attention_dropout_rate (`float`, *optional*, defaults to 0.2):
+ The attention dropout rate in the decoder.
+ duration_predictor_dropout_rate (`float`, *optional*, defaults to 0.2):
+ The dropout rate in the duration predictor.
+ speech_decoder_postnet_dropout (`float`, *optional*, defaults to 0.5):
+ The dropout rate in the speech decoder postnet.
+ max_source_positions (`int`, *optional*, defaults to 5000):
+ if `"relative"` position embeddings are used, defines the maximum source input positions.
+ use_masking (`bool`, *optional*, defaults to `True`):
+ Specifies whether to use masking in the model.
+ use_weighted_masking (`bool`, *optional*, defaults to `False`):
+ Specifies whether to use weighted masking in the model.
+ num_speakers (`int`, *optional*):
+ Number of speakers. If set to > 1, assume that the speaker ids will be provided as the input and use
+ speaker id embedding layer.
+ num_languages (`int`, *optional*):
+ Number of languages. If set to > 1, assume that the language ids will be provided as the input and use the
+ languge id embedding layer.
+ speaker_embed_dim (`int`, *optional*):
+ Speaker embedding dimension. If set to > 0, assume that speaker_embedding will be provided as the input.
+ is_encoder_decoder (`bool`, *optional*, defaults to `True`):
+ Specifies whether the model is an encoder-decoder.
+
+ Example:
+
+ ```python
+ >>> from transformers import FastSpeech2ConformerModel, FastSpeech2ConformerConfig
+
+ >>> # Initializing a FastSpeech2Conformer style configuration
+ >>> configuration = FastSpeech2ConformerConfig()
+
+ >>> # Initializing a model from the FastSpeech2Conformer style configuration
+ >>> model = FastSpeech2ConformerModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "fastspeech2_conformer"
+ attribute_map = {"num_hidden_layers": "encoder_layers", "num_attention_heads": "encoder_num_attention_heads"}
+
+ def __init__(
+ self,
+ hidden_size=384,
+ vocab_size=78,
+ num_mel_bins=80,
+ encoder_num_attention_heads=2,
+ encoder_layers=4,
+ encoder_linear_units=1536,
+ decoder_layers=4,
+ decoder_num_attention_heads=2,
+ decoder_linear_units=1536,
+ speech_decoder_postnet_layers=5,
+ speech_decoder_postnet_units=256,
+ speech_decoder_postnet_kernel=5,
+ positionwise_conv_kernel_size=3,
+ encoder_normalize_before=False,
+ decoder_normalize_before=False,
+ encoder_concat_after=False,
+ decoder_concat_after=False,
+ reduction_factor=1,
+ speaking_speed=1.0,
+ use_macaron_style_in_conformer=True,
+ use_cnn_in_conformer=True,
+ encoder_kernel_size=7,
+ decoder_kernel_size=31,
+ duration_predictor_layers=2,
+ duration_predictor_channels=256,
+ duration_predictor_kernel_size=3,
+ energy_predictor_layers=2,
+ energy_predictor_channels=256,
+ energy_predictor_kernel_size=3,
+ energy_predictor_dropout=0.5,
+ energy_embed_kernel_size=1,
+ energy_embed_dropout=0.0,
+ stop_gradient_from_energy_predictor=False,
+ pitch_predictor_layers=5,
+ pitch_predictor_channels=256,
+ pitch_predictor_kernel_size=5,
+ pitch_predictor_dropout=0.5,
+ pitch_embed_kernel_size=1,
+ pitch_embed_dropout=0.0,
+ stop_gradient_from_pitch_predictor=True,
+ encoder_dropout_rate=0.2,
+ encoder_positional_dropout_rate=0.2,
+ encoder_attention_dropout_rate=0.2,
+ decoder_dropout_rate=0.2,
+ decoder_positional_dropout_rate=0.2,
+ decoder_attention_dropout_rate=0.2,
+ duration_predictor_dropout_rate=0.2,
+ speech_decoder_postnet_dropout=0.5,
+ max_source_positions=5000,
+ use_masking=True,
+ use_weighted_masking=False,
+ num_speakers=None,
+ num_languages=None,
+ speaker_embed_dim=None,
+ is_encoder_decoder=True,
+ **kwargs,
+ ):
+ if positionwise_conv_kernel_size % 2 == 0:
+ raise ValueError(
+ f"positionwise_conv_kernel_size must be odd, but got {positionwise_conv_kernel_size} instead."
+ )
+ if encoder_kernel_size % 2 == 0:
+ raise ValueError(f"encoder_kernel_size must be odd, but got {encoder_kernel_size} instead.")
+ if decoder_kernel_size % 2 == 0:
+ raise ValueError(f"decoder_kernel_size must be odd, but got {decoder_kernel_size} instead.")
+ if duration_predictor_kernel_size % 2 == 0:
+ raise ValueError(
+ f"duration_predictor_kernel_size must be odd, but got {duration_predictor_kernel_size} instead."
+ )
+ if energy_predictor_kernel_size % 2 == 0:
+ raise ValueError(
+ f"energy_predictor_kernel_size must be odd, but got {energy_predictor_kernel_size} instead."
+ )
+ if energy_embed_kernel_size % 2 == 0:
+ raise ValueError(f"energy_embed_kernel_size must be odd, but got {energy_embed_kernel_size} instead.")
+ if pitch_predictor_kernel_size % 2 == 0:
+ raise ValueError(
+ f"pitch_predictor_kernel_size must be odd, but got {pitch_predictor_kernel_size} instead."
+ )
+ if pitch_embed_kernel_size % 2 == 0:
+ raise ValueError(f"pitch_embed_kernel_size must be odd, but got {pitch_embed_kernel_size} instead.")
+ if hidden_size % encoder_num_attention_heads != 0:
+ raise ValueError("The hidden_size must be evenly divisible by encoder_num_attention_heads.")
+ if hidden_size % decoder_num_attention_heads != 0:
+ raise ValueError("The hidden_size must be evenly divisible by decoder_num_attention_heads.")
+ if use_masking and use_weighted_masking:
+ raise ValueError("Either use_masking or use_weighted_masking can be True, but not both.")
+
+ self.hidden_size = hidden_size
+ self.vocab_size = vocab_size
+ self.num_mel_bins = num_mel_bins
+ self.encoder_config = {
+ "num_attention_heads": encoder_num_attention_heads,
+ "layers": encoder_layers,
+ "kernel_size": encoder_kernel_size,
+ "attention_dropout_rate": encoder_attention_dropout_rate,
+ "dropout_rate": encoder_dropout_rate,
+ "positional_dropout_rate": encoder_positional_dropout_rate,
+ "linear_units": encoder_linear_units,
+ "normalize_before": encoder_normalize_before,
+ "concat_after": encoder_concat_after,
+ }
+ self.decoder_config = {
+ "num_attention_heads": decoder_num_attention_heads,
+ "layers": decoder_layers,
+ "kernel_size": decoder_kernel_size,
+ "attention_dropout_rate": decoder_attention_dropout_rate,
+ "dropout_rate": decoder_dropout_rate,
+ "positional_dropout_rate": decoder_positional_dropout_rate,
+ "linear_units": decoder_linear_units,
+ "normalize_before": decoder_normalize_before,
+ "concat_after": decoder_concat_after,
+ }
+ self.encoder_num_attention_heads = encoder_num_attention_heads
+ self.encoder_layers = encoder_layers
+ self.duration_predictor_channels = duration_predictor_channels
+ self.duration_predictor_kernel_size = duration_predictor_kernel_size
+ self.duration_predictor_layers = duration_predictor_layers
+ self.energy_embed_dropout = energy_embed_dropout
+ self.energy_embed_kernel_size = energy_embed_kernel_size
+ self.energy_predictor_channels = energy_predictor_channels
+ self.energy_predictor_dropout = energy_predictor_dropout
+ self.energy_predictor_kernel_size = energy_predictor_kernel_size
+ self.energy_predictor_layers = energy_predictor_layers
+ self.pitch_embed_dropout = pitch_embed_dropout
+ self.pitch_embed_kernel_size = pitch_embed_kernel_size
+ self.pitch_predictor_channels = pitch_predictor_channels
+ self.pitch_predictor_dropout = pitch_predictor_dropout
+ self.pitch_predictor_kernel_size = pitch_predictor_kernel_size
+ self.pitch_predictor_layers = pitch_predictor_layers
+ self.positionwise_conv_kernel_size = positionwise_conv_kernel_size
+ self.speech_decoder_postnet_units = speech_decoder_postnet_units
+ self.speech_decoder_postnet_dropout = speech_decoder_postnet_dropout
+ self.speech_decoder_postnet_kernel = speech_decoder_postnet_kernel
+ self.speech_decoder_postnet_layers = speech_decoder_postnet_layers
+ self.reduction_factor = reduction_factor
+ self.speaking_speed = speaking_speed
+ self.stop_gradient_from_energy_predictor = stop_gradient_from_energy_predictor
+ self.stop_gradient_from_pitch_predictor = stop_gradient_from_pitch_predictor
+ self.max_source_positions = max_source_positions
+ self.use_cnn_in_conformer = use_cnn_in_conformer
+ self.use_macaron_style_in_conformer = use_macaron_style_in_conformer
+ self.use_masking = use_masking
+ self.use_weighted_masking = use_weighted_masking
+ self.num_speakers = num_speakers
+ self.num_languages = num_languages
+ self.speaker_embed_dim = speaker_embed_dim
+ self.duration_predictor_dropout_rate = duration_predictor_dropout_rate
+ self.is_encoder_decoder = is_encoder_decoder
+
+ super().__init__(
+ is_encoder_decoder=is_encoder_decoder,
+ **kwargs,
+ )
+
+
+class FastSpeech2ConformerHifiGanConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`FastSpeech2ConformerHifiGanModel`]. It is used to
+ instantiate a FastSpeech2Conformer HiFi-GAN vocoder model according to the specified arguments, defining the model
+ architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the
+ FastSpeech2Conformer
+ [espnet/fastspeech2_conformer_hifigan](https://huggingface.co/espnet/fastspeech2_conformer_hifigan) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ model_in_dim (`int`, *optional*, defaults to 80):
+ The number of frequency bins in the input log-mel spectrogram.
+ upsample_initial_channel (`int`, *optional*, defaults to 512):
+ The number of input channels into the upsampling network.
+ upsample_rates (`Tuple[int]` or `List[int]`, *optional*, defaults to `[8, 8, 2, 2]`):
+ A tuple of integers defining the stride of each 1D convolutional layer in the upsampling network. The
+ length of *upsample_rates* defines the number of convolutional layers and has to match the length of
+ *upsample_kernel_sizes*.
+ upsample_kernel_sizes (`Tuple[int]` or `List[int]`, *optional*, defaults to `[16, 16, 4, 4]`):
+ A tuple of integers defining the kernel size of each 1D convolutional layer in the upsampling network. The
+ length of *upsample_kernel_sizes* defines the number of convolutional layers and has to match the length of
+ *upsample_rates*.
+ resblock_kernel_sizes (`Tuple[int]` or `List[int]`, *optional*, defaults to `[3, 7, 11]`):
+ A tuple of integers defining the kernel sizes of the 1D convolutional layers in the multi-receptive field
+ fusion (MRF) module.
+ resblock_dilation_sizes (`Tuple[Tuple[int]]` or `List[List[int]]`, *optional*, defaults to `[[1, 3, 5], [1, 3, 5], [1, 3, 5]]`):
+ A nested tuple of integers defining the dilation rates of the dilated 1D convolutional layers in the
+ multi-receptive field fusion (MRF) module.
+ initializer_range (`float`, *optional*, defaults to 0.01):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ leaky_relu_slope (`float`, *optional*, defaults to 0.1):
+ The angle of the negative slope used by the leaky ReLU activation.
+ normalize_before (`bool`, *optional*, defaults to `True`):
+ Whether or not to normalize the spectrogram before vocoding using the vocoder's learned mean and variance.
+
+ Example:
+
+ ```python
+ >>> from transformers import FastSpeech2ConformerHifiGan, FastSpeech2ConformerHifiGanConfig
+
+ >>> # Initializing a FastSpeech2ConformerHifiGan configuration
+ >>> configuration = FastSpeech2ConformerHifiGanConfig()
+
+ >>> # Initializing a model (with random weights) from the configuration
+ >>> model = FastSpeech2ConformerHifiGan(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "hifigan"
+
+ def __init__(
+ self,
+ model_in_dim=80,
+ upsample_initial_channel=512,
+ upsample_rates=[8, 8, 2, 2],
+ upsample_kernel_sizes=[16, 16, 4, 4],
+ resblock_kernel_sizes=[3, 7, 11],
+ resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
+ initializer_range=0.01,
+ leaky_relu_slope=0.1,
+ normalize_before=True,
+ **kwargs,
+ ):
+ self.model_in_dim = model_in_dim
+ self.upsample_initial_channel = upsample_initial_channel
+ self.upsample_rates = upsample_rates
+ self.upsample_kernel_sizes = upsample_kernel_sizes
+ self.resblock_kernel_sizes = resblock_kernel_sizes
+ self.resblock_dilation_sizes = resblock_dilation_sizes
+ self.initializer_range = initializer_range
+ self.leaky_relu_slope = leaky_relu_slope
+ self.normalize_before = normalize_before
+ super().__init__(**kwargs)
+
+
+class FastSpeech2ConformerWithHifiGanConfig(PretrainedConfig):
+ """
+ This is the configuration class to store the configuration of a [`FastSpeech2ConformerWithHifiGan`]. It is used to
+ instantiate a `FastSpeech2ConformerWithHifiGanModel` model according to the specified sub-models configurations,
+ defining the model architecture.
+
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the
+ FastSpeech2ConformerModel [espnet/fastspeech2_conformer](https://huggingface.co/espnet/fastspeech2_conformer) and
+ FastSpeech2ConformerHifiGan
+ [espnet/fastspeech2_conformer_hifigan](https://huggingface.co/espnet/fastspeech2_conformer_hifigan) architectures.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ model_config (`typing.Dict`, *optional*):
+ Configuration of the text-to-speech model.
+ vocoder_config (`typing.Dict`, *optional*):
+ Configuration of the vocoder model.
+ model_config ([`FastSpeech2ConformerConfig`], *optional*):
+ Configuration of the text-to-speech model.
+ vocoder_config ([`FastSpeech2ConformerHiFiGanConfig`], *optional*):
+ Configuration of the vocoder model.
+
+ Example:
+
+ ```python
+ >>> from transformers import (
+ ... FastSpeech2ConformerConfig,
+ ... FastSpeech2ConformerHifiGanConfig,
+ ... FastSpeech2ConformerWithHifiGanConfig,
+ ... FastSpeech2ConformerWithHifiGan,
+ ... )
+
+ >>> # Initializing FastSpeech2ConformerWithHifiGan sub-modules configurations.
+ >>> model_config = FastSpeech2ConformerConfig()
+ >>> vocoder_config = FastSpeech2ConformerHifiGanConfig()
+
+ >>> # Initializing a FastSpeech2ConformerWithHifiGan module style configuration
+ >>> configuration = FastSpeech2ConformerWithHifiGanConfig(model_config.to_dict(), vocoder_config.to_dict())
+
+ >>> # Initializing a model (with random weights)
+ >>> model = FastSpeech2ConformerWithHifiGan(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```
+ """
+
+ model_type = "fastspeech2_conformer_with_hifigan"
+ is_composition = True
+
+ def __init__(
+ self,
+ model_config: Dict = None,
+ vocoder_config: Dict = None,
+ **kwargs,
+ ):
+ if model_config is None:
+ model_config = {}
+ logger.info("model_config is None. initializing the model with default values.")
+
+ if vocoder_config is None:
+ vocoder_config = {}
+ logger.info("vocoder_config is None. initializing the coarse model with default values.")
+
+ self.model_config = FastSpeech2ConformerConfig(**model_config)
+ self.vocoder_config = FastSpeech2ConformerHifiGanConfig(**vocoder_config)
+
+ super().__init__(**kwargs)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/convert_fastspeech2_conformer_original_pytorch_checkpoint_to_pytorch.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/convert_fastspeech2_conformer_original_pytorch_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb9c432f82292f0a22c276821130f65e30f45e6a
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/convert_fastspeech2_conformer_original_pytorch_checkpoint_to_pytorch.py
@@ -0,0 +1,210 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert FastSpeech2Conformer checkpoint."""
+
+import argparse
+import json
+import re
+from pathlib import Path
+from tempfile import TemporaryDirectory
+
+import torch
+import yaml
+
+from transformers import (
+ FastSpeech2ConformerConfig,
+ FastSpeech2ConformerModel,
+ FastSpeech2ConformerTokenizer,
+ logging,
+)
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger("transformers.models.FastSpeech2Conformer")
+
+CONFIG_MAPPING = {
+ "adim": "hidden_size",
+ "aheads": "num_attention_heads",
+ "conformer_dec_kernel_size": "decoder_kernel_size",
+ "conformer_enc_kernel_size": "encoder_kernel_size",
+ "decoder_normalize_before": "decoder_normalize_before",
+ "dlayers": "decoder_layers",
+ "dunits": "decoder_linear_units",
+ "duration_predictor_chans": "duration_predictor_channels",
+ "duration_predictor_kernel_size": "duration_predictor_kernel_size",
+ "duration_predictor_layers": "duration_predictor_layers",
+ "elayers": "encoder_layers",
+ "encoder_normalize_before": "encoder_normalize_before",
+ "energy_embed_dropout": "energy_embed_dropout",
+ "energy_embed_kernel_size": "energy_embed_kernel_size",
+ "energy_predictor_chans": "energy_predictor_channels",
+ "energy_predictor_dropout": "energy_predictor_dropout",
+ "energy_predictor_kernel_size": "energy_predictor_kernel_size",
+ "energy_predictor_layers": "energy_predictor_layers",
+ "eunits": "encoder_linear_units",
+ "pitch_embed_dropout": "pitch_embed_dropout",
+ "pitch_embed_kernel_size": "pitch_embed_kernel_size",
+ "pitch_predictor_chans": "pitch_predictor_channels",
+ "pitch_predictor_dropout": "pitch_predictor_dropout",
+ "pitch_predictor_kernel_size": "pitch_predictor_kernel_size",
+ "pitch_predictor_layers": "pitch_predictor_layers",
+ "positionwise_conv_kernel_size": "positionwise_conv_kernel_size",
+ "postnet_chans": "speech_decoder_postnet_units",
+ "postnet_filts": "speech_decoder_postnet_kernel",
+ "postnet_layers": "speech_decoder_postnet_layers",
+ "reduction_factor": "reduction_factor",
+ "stop_gradient_from_energy_predictor": "stop_gradient_from_energy_predictor",
+ "stop_gradient_from_pitch_predictor": "stop_gradient_from_pitch_predictor",
+ "transformer_dec_attn_dropout_rate": "decoder_attention_dropout_rate",
+ "transformer_dec_dropout_rate": "decoder_dropout_rate",
+ "transformer_dec_positional_dropout_rate": "decoder_positional_dropout_rate",
+ "transformer_enc_attn_dropout_rate": "encoder_attention_dropout_rate",
+ "transformer_enc_dropout_rate": "encoder_dropout_rate",
+ "transformer_enc_positional_dropout_rate": "encoder_positional_dropout_rate",
+ "use_cnn_in_conformer": "use_cnn_in_conformer",
+ "use_macaron_style_in_conformer": "use_macaron_style_in_conformer",
+ "use_masking": "use_masking",
+ "use_weighted_masking": "use_weighted_masking",
+ "idim": "input_dim",
+ "odim": "num_mel_bins",
+ "spk_embed_dim": "speaker_embed_dim",
+ "langs": "num_languages",
+ "spks": "num_speakers",
+}
+
+
+def remap_model_yaml_config(yaml_config_path):
+ with Path(yaml_config_path).open("r", encoding="utf-8") as f:
+ args = yaml.safe_load(f)
+ args = argparse.Namespace(**args)
+
+ remapped_config = {}
+
+ model_params = args.tts_conf["text2mel_params"]
+ # espnet_config_key -> hf_config_key, any keys not included are ignored
+ for espnet_config_key, hf_config_key in CONFIG_MAPPING.items():
+ if espnet_config_key in model_params:
+ remapped_config[hf_config_key] = model_params[espnet_config_key]
+
+ return remapped_config, args.g2p, args.token_list
+
+
+def convert_espnet_state_dict_to_hf(state_dict):
+ new_state_dict = {}
+ for key in state_dict:
+ if "tts.generator.text2mel." in key:
+ new_key = key.replace("tts.generator.text2mel.", "")
+ if "postnet" in key:
+ new_key = new_key.replace("postnet.postnet", "speech_decoder_postnet.layers")
+ new_key = new_key.replace(".0.weight", ".conv.weight")
+ new_key = new_key.replace(".1.weight", ".batch_norm.weight")
+ new_key = new_key.replace(".1.bias", ".batch_norm.bias")
+ new_key = new_key.replace(".1.running_mean", ".batch_norm.running_mean")
+ new_key = new_key.replace(".1.running_var", ".batch_norm.running_var")
+ new_key = new_key.replace(".1.num_batches_tracked", ".batch_norm.num_batches_tracked")
+ if "feat_out" in key:
+ if "weight" in key:
+ new_key = "speech_decoder_postnet.feat_out.weight"
+ if "bias" in key:
+ new_key = "speech_decoder_postnet.feat_out.bias"
+ if "encoder.embed.0.weight" in key:
+ new_key = new_key.replace("0.", "")
+ if "w_1" in key:
+ new_key = new_key.replace("w_1", "conv1")
+ if "w_2" in key:
+ new_key = new_key.replace("w_2", "conv2")
+ if "predictor.conv" in key:
+ new_key = new_key.replace(".conv", ".conv_layers")
+ pattern = r"(\d)\.(\d)"
+ replacement = (
+ r"\1.conv" if ("2.weight" not in new_key) and ("2.bias" not in new_key) else r"\1.layer_norm"
+ )
+ new_key = re.sub(pattern, replacement, new_key)
+ if "pitch_embed" in key or "energy_embed" in key:
+ new_key = new_key.replace("0", "conv")
+ if "encoders" in key:
+ new_key = new_key.replace("encoders", "conformer_layers")
+ new_key = new_key.replace("norm_final", "final_layer_norm")
+ new_key = new_key.replace("norm_mha", "self_attn_layer_norm")
+ new_key = new_key.replace("norm_ff_macaron", "ff_macaron_layer_norm")
+ new_key = new_key.replace("norm_ff", "ff_layer_norm")
+ new_key = new_key.replace("norm_conv", "conv_layer_norm")
+ if "lid_emb" in key:
+ new_key = new_key.replace("lid_emb", "language_id_embedding")
+ if "sid_emb" in key:
+ new_key = new_key.replace("sid_emb", "speaker_id_embedding")
+
+ new_state_dict[new_key] = state_dict[key]
+
+ return new_state_dict
+
+
+@torch.no_grad()
+def convert_FastSpeech2ConformerModel_checkpoint(
+ checkpoint_path,
+ yaml_config_path,
+ pytorch_dump_folder_path,
+ repo_id=None,
+):
+ model_params, tokenizer_name, vocab = remap_model_yaml_config(yaml_config_path)
+ config = FastSpeech2ConformerConfig(**model_params)
+
+ # Prepare the model
+ model = FastSpeech2ConformerModel(config)
+
+ espnet_checkpoint = torch.load(checkpoint_path)
+ hf_compatible_state_dict = convert_espnet_state_dict_to_hf(espnet_checkpoint)
+
+ model.load_state_dict(hf_compatible_state_dict)
+
+ model.save_pretrained(pytorch_dump_folder_path)
+
+ # Prepare the tokenizer
+ with TemporaryDirectory() as tempdir:
+ vocab = {token: id for id, token in enumerate(vocab)}
+ vocab_file = Path(tempdir) / "vocab.json"
+ with open(vocab_file, "w") as f:
+ json.dump(vocab, f)
+ should_strip_spaces = "no_space" in tokenizer_name
+ tokenizer = FastSpeech2ConformerTokenizer(str(vocab_file), should_strip_spaces=should_strip_spaces)
+
+ tokenizer.save_pretrained(pytorch_dump_folder_path)
+
+ if repo_id:
+ print("Pushing to the hub...")
+ model.push_to_hub(repo_id)
+ tokenizer.push_to_hub(repo_id)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
+ parser.add_argument(
+ "--yaml_config_path", required=True, default=None, type=str, help="Path to config.yaml of model to convert"
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
+ )
+ parser.add_argument(
+ "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
+ )
+
+ args = parser.parse_args()
+ convert_FastSpeech2ConformerModel_checkpoint(
+ args.checkpoint_path,
+ args.yaml_config_path,
+ args.pytorch_dump_folder_path,
+ args.push_to_hub,
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/convert_hifigan.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/convert_hifigan.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec9f57ce7142d619259555fb89f4f1366947fe71
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/convert_hifigan.py
@@ -0,0 +1,134 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert FastSpeech2Conformer HiFi-GAN checkpoint."""
+
+import argparse
+from pathlib import Path
+
+import torch
+import yaml
+
+from transformers import FastSpeech2ConformerHifiGan, FastSpeech2ConformerHifiGanConfig, logging
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger("transformers.models.FastSpeech2Conformer")
+
+
+def load_weights(checkpoint, hf_model, config):
+ vocoder_key_prefix = "tts.generator.vocoder."
+ checkpoint = {k.replace(vocoder_key_prefix, ""): v for k, v in checkpoint.items() if vocoder_key_prefix in k}
+
+ hf_model.apply_weight_norm()
+
+ hf_model.conv_pre.weight_g.data = checkpoint["input_conv.weight_g"]
+ hf_model.conv_pre.weight_v.data = checkpoint["input_conv.weight_v"]
+ hf_model.conv_pre.bias.data = checkpoint["input_conv.bias"]
+
+ for i in range(len(config.upsample_rates)):
+ hf_model.upsampler[i].weight_g.data = checkpoint[f"upsamples.{i}.1.weight_g"]
+ hf_model.upsampler[i].weight_v.data = checkpoint[f"upsamples.{i}.1.weight_v"]
+ hf_model.upsampler[i].bias.data = checkpoint[f"upsamples.{i}.1.bias"]
+
+ for i in range(len(config.upsample_rates) * len(config.resblock_kernel_sizes)):
+ for j in range(len(config.resblock_dilation_sizes)):
+ hf_model.resblocks[i].convs1[j].weight_g.data = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"]
+ hf_model.resblocks[i].convs1[j].weight_v.data = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"]
+ hf_model.resblocks[i].convs1[j].bias.data = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"]
+
+ hf_model.resblocks[i].convs2[j].weight_g.data = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"]
+ hf_model.resblocks[i].convs2[j].weight_v.data = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"]
+ hf_model.resblocks[i].convs2[j].bias.data = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"]
+
+ hf_model.conv_post.weight_g.data = checkpoint["output_conv.1.weight_g"]
+ hf_model.conv_post.weight_v.data = checkpoint["output_conv.1.weight_v"]
+ hf_model.conv_post.bias.data = checkpoint["output_conv.1.bias"]
+
+ hf_model.remove_weight_norm()
+
+
+def remap_hifigan_yaml_config(yaml_config_path):
+ with Path(yaml_config_path).open("r", encoding="utf-8") as f:
+ args = yaml.safe_load(f)
+ args = argparse.Namespace(**args)
+
+ vocoder_type = args.tts_conf["vocoder_type"]
+ if vocoder_type != "hifigan_generator":
+ raise TypeError(f"Vocoder config must be for `hifigan_generator`, but got {vocoder_type}")
+
+ remapped_dict = {}
+ vocoder_params = args.tts_conf["vocoder_params"]
+
+ # espnet_config_key -> hf_config_key
+ key_mappings = {
+ "channels": "upsample_initial_channel",
+ "in_channels": "model_in_dim",
+ "resblock_dilations": "resblock_dilation_sizes",
+ "resblock_kernel_sizes": "resblock_kernel_sizes",
+ "upsample_kernel_sizes": "upsample_kernel_sizes",
+ "upsample_scales": "upsample_rates",
+ }
+ for espnet_config_key, hf_config_key in key_mappings.items():
+ remapped_dict[hf_config_key] = vocoder_params[espnet_config_key]
+ remapped_dict["sampling_rate"] = args.tts_conf["sampling_rate"]
+ remapped_dict["normalize_before"] = False
+ remapped_dict["leaky_relu_slope"] = vocoder_params["nonlinear_activation_params"]["negative_slope"]
+
+ return remapped_dict
+
+
+@torch.no_grad()
+def convert_hifigan_checkpoint(
+ checkpoint_path,
+ pytorch_dump_folder_path,
+ yaml_config_path=None,
+ repo_id=None,
+):
+ if yaml_config_path is not None:
+ config_kwargs = remap_hifigan_yaml_config(yaml_config_path)
+ config = FastSpeech2ConformerHifiGanConfig(**config_kwargs)
+ else:
+ config = FastSpeech2ConformerHifiGanConfig()
+
+ model = FastSpeech2ConformerHifiGan(config)
+
+ orig_checkpoint = torch.load(checkpoint_path)
+ load_weights(orig_checkpoint, model, config)
+
+ model.save_pretrained(pytorch_dump_folder_path)
+
+ if repo_id:
+ print("Pushing to the hub...")
+ model.push_to_hub(repo_id)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
+ parser.add_argument("--yaml_config_path", default=None, type=str, help="Path to config.yaml of model to convert")
+ parser.add_argument(
+ "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
+ )
+ parser.add_argument(
+ "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
+ )
+
+ args = parser.parse_args()
+ convert_hifigan_checkpoint(
+ args.checkpoint_path,
+ args.pytorch_dump_folder_path,
+ args.yaml_config_path,
+ args.push_to_hub,
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/convert_model_with_hifigan.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/convert_model_with_hifigan.py
new file mode 100644
index 0000000000000000000000000000000000000000..2a780d5cf0b8ea8a69a0bfc7f02796fbaa2b8c5b
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/convert_model_with_hifigan.py
@@ -0,0 +1,102 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert FastSpeech2Conformer checkpoint."""
+
+import argparse
+
+import torch
+
+from transformers import (
+ FastSpeech2ConformerConfig,
+ FastSpeech2ConformerHifiGan,
+ FastSpeech2ConformerHifiGanConfig,
+ FastSpeech2ConformerModel,
+ FastSpeech2ConformerWithHifiGan,
+ FastSpeech2ConformerWithHifiGanConfig,
+ logging,
+)
+
+from .convert_fastspeech2_conformer_original_pytorch_checkpoint_to_pytorch import (
+ convert_espnet_state_dict_to_hf,
+ remap_model_yaml_config,
+)
+from .convert_hifigan import load_weights, remap_hifigan_yaml_config
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger("transformers.models.FastSpeech2Conformer")
+
+
+def convert_FastSpeech2ConformerWithHifiGan_checkpoint(
+ checkpoint_path,
+ yaml_config_path,
+ pytorch_dump_folder_path,
+ repo_id=None,
+):
+ # Prepare the model
+ model_params, *_ = remap_model_yaml_config(yaml_config_path)
+ model_config = FastSpeech2ConformerConfig(**model_params)
+
+ model = FastSpeech2ConformerModel(model_config)
+
+ espnet_checkpoint = torch.load(checkpoint_path)
+ hf_compatible_state_dict = convert_espnet_state_dict_to_hf(espnet_checkpoint)
+ model.load_state_dict(hf_compatible_state_dict)
+
+ # Prepare the vocoder
+ config_kwargs = remap_hifigan_yaml_config(yaml_config_path)
+ vocoder_config = FastSpeech2ConformerHifiGanConfig(**config_kwargs)
+
+ vocoder = FastSpeech2ConformerHifiGan(vocoder_config)
+ load_weights(espnet_checkpoint, vocoder, vocoder_config)
+
+ # Prepare the model + vocoder
+ config = FastSpeech2ConformerWithHifiGanConfig.from_sub_model_configs(model_config, vocoder_config)
+ with_hifigan_model = FastSpeech2ConformerWithHifiGan(config)
+ with_hifigan_model.model = model
+ with_hifigan_model.vocoder = vocoder
+
+ with_hifigan_model.save_pretrained(pytorch_dump_folder_path)
+
+ if repo_id:
+ print("Pushing to the hub...")
+ with_hifigan_model.push_to_hub(repo_id)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
+ parser.add_argument(
+ "--yaml_config_path", required=True, default=None, type=str, help="Path to config.yaml of model to convert"
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path",
+ required=True,
+ default=None,
+ type=str,
+ help="Path to the output `FastSpeech2ConformerModel` PyTorch model.",
+ )
+ parser.add_argument(
+ "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
+ )
+
+ args = parser.parse_args()
+
+ convert_FastSpeech2ConformerWithHifiGan_checkpoint(
+ args.checkpoint_path,
+ args.yaml_config_path,
+ args.pytorch_dump_folder_path,
+ args.push_to_hub,
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..cc57747c59a4be444538cfc07435ff00b8e31b08
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py
@@ -0,0 +1,1686 @@
+# coding=utf-8
+# Copyright 2023 The Espnet authors, IMS Toucan authors, and the HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch FastSpeech2Conformer model."""
+
+import math
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import torch
+from torch import nn
+
+from ...modeling_outputs import BaseModelOutput
+from ...modeling_utils import PreTrainedModel
+from ...utils import ModelOutput, add_start_docstrings, logging, replace_return_docstrings
+from .configuration_fastspeech2_conformer import (
+ FastSpeech2ConformerConfig,
+ FastSpeech2ConformerHifiGanConfig,
+ FastSpeech2ConformerWithHifiGanConfig,
+)
+
+
+logger = logging.get_logger(__name__)
+
+FASTSPEECH2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "espnet/fastspeech2_conformer",
+ # See all FastSpeech2Conformer models at https://huggingface.co/models?filter=fastspeech2_conformer
+]
+
+
+@dataclass
+class FastSpeech2ConformerModelOutput(ModelOutput):
+ """
+ Output type of [`FastSpeech2ConformerModel`].
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Spectrogram generation loss.
+ spectrogram (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_bins)`):
+ The predicted spectrogram.
+ encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
+ encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
+ encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
+ decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ duration_outputs (`torch.LongTensor` of shape `(batch_size, max_text_length + 1)`, *optional*):
+ Outputs of the duration predictor.
+ pitch_outputs (`torch.FloatTensor` of shape `(batch_size, max_text_length + 1, 1)`, *optional*):
+ Outputs of the pitch predictor.
+ energy_outputs (`torch.FloatTensor` of shape `(batch_size, max_text_length + 1, 1)`, *optional*):
+ Outputs of the energy predictor.
+
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ spectrogram: torch.FloatTensor = None
+ encoder_last_hidden_state: Optional[torch.FloatTensor] = None
+ encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ duration_outputs: torch.LongTensor = None
+ pitch_outputs: torch.FloatTensor = None
+ energy_outputs: torch.FloatTensor = None
+
+
+@dataclass
+class FastSpeech2ConformerWithHifiGanOutput(FastSpeech2ConformerModelOutput):
+ """
+ Output type of [`FastSpeech2ConformerWithHifiGan`].
+
+ Args:
+ waveform (`torch.FloatTensor` of shape `(batch_size, audio_length)`):
+ Speech output as a result of passing the predicted mel spectrogram through the vocoder.
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Spectrogram generation loss.
+ spectrogram (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_bins)`):
+ The predicted spectrogram.
+ encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
+ encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
+ encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
+ decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ duration_outputs (`torch.LongTensor` of shape `(batch_size, max_text_length + 1)`, *optional*):
+ Outputs of the duration predictor.
+ pitch_outputs (`torch.FloatTensor` of shape `(batch_size, max_text_length + 1, 1)`, *optional*):
+ Outputs of the pitch predictor.
+ energy_outputs (`torch.FloatTensor` of shape `(batch_size, max_text_length + 1, 1)`, *optional*):
+ Outputs of the energy predictor.
+ """
+
+ waveform: torch.FloatTensor = None
+
+
+_CONFIG_FOR_DOC = "FastSpeech2ConformerConfig"
+
+FASTSPEECH2_CONFORMER_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`FastSpeech2ConformerConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+HIFIGAN_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`FastSpeech2ConformerConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+FASTSPEECH2_CONFORMER_WITH_HIFIGAN_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`FastSpeech2ConformerWithHifiGanConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+def length_regulator(encoded_embeddings, duration_labels, speaking_speed=1.0):
+ """
+ Length regulator for feed-forward Transformer.
+
+ This is the length regulator module described in `FastSpeech: Fast, Robust and Controllable Text to Speech`
+ https://arxiv.org/pdf/1905.09263.pdf. The length regulator expands char or phoneme-level embedding features to
+ frame-level by repeating each feature based on the corresponding predicted durations.
+
+ Args:
+ encoded_embeddings (`torch.Tensor` of shape `(batch_size, max_text_length, embedding_dim)`):
+ Batch of sequences of char or phoneme embeddings.
+ duration_labels (`torch.LongTensor` of shape `(batch_size, time)`):
+ Batch of durations of each frame.
+ speaking_speed (`float`, *optional*, defaults to 1.0):
+ Value to control speed of speech.
+
+ Returns:
+ `torch.Tensor`:
+ Replicated input tensor based on durations (batch_size, time*, embedding_dim).
+ """
+
+ if speaking_speed <= 0:
+ raise ValueError("`speaking_speed` must be greater than 0.")
+ elif speaking_speed != 1.0:
+ duration_labels = torch.round(duration_labels.float() * speaking_speed).long()
+
+ if duration_labels.sum() == 0:
+ duration_labels[duration_labels.sum(dim=1).eq(0)] = 1
+
+ # Calculate the maximum length needed
+ max_len = torch.sum(duration_labels, dim=1).max()
+
+ # Create a padded tensor to hold the results
+ hidden_states = torch.zeros(
+ (encoded_embeddings.size(0), max_len, encoded_embeddings.size(2)),
+ dtype=torch.float,
+ device=encoded_embeddings.device,
+ )
+
+ # Loop through the batch and fill in the data
+ for i, (encoded_embedding, target_duration) in enumerate(zip(encoded_embeddings, duration_labels)):
+ repeated = torch.repeat_interleave(encoded_embedding, target_duration, dim=0)
+ hidden_states[i, : repeated.size(0)] = repeated
+
+ return hidden_states
+
+
+class FastSpeech2ConformerDurationPredictor(nn.Module):
+ """
+ Duration predictor module.
+
+ This is a module of duration predictor described in the paper 'FastSpeech: Fast, Robust and Controllable Text to
+ Speech' https://arxiv.org/pdf/1905.09263.pdf The duration predictor predicts a duration of each frame in log domain
+ from the hidden embeddings of encoder.
+
+ Note:
+ The calculation domain of outputs is different between in `forward` and in `inference`. In `forward`, the
+ outputs are calculated in log domain but in `inference`, those are calculated in linear domain.
+
+ """
+
+ def __init__(self, config: FastSpeech2ConformerConfig):
+ super().__init__()
+
+ self.conv_layers = nn.ModuleList()
+ self.log_domain_offset = 1.0
+
+ for layer_idx in range(config.duration_predictor_layers):
+ num_chans = config.duration_predictor_channels
+ input_channels = config.hidden_size if layer_idx == 0 else num_chans
+ layer = FastSpeech2ConformerPredictorLayer(
+ input_channels,
+ num_chans,
+ config.duration_predictor_kernel_size,
+ config.duration_predictor_dropout_rate,
+ )
+ self.conv_layers.append(layer)
+ self.linear = nn.Linear(config.duration_predictor_channels, 1)
+
+ def forward(self, encoder_hidden_states):
+ """
+ Args:
+ hidden_states (`torch.Tensor` of shape `(batch_size, max_text_length, input_dim)`):
+ Batch of input sequences.
+ padding_masks (`torch.ByteTensor` of shape `(batch_size, max_text_length)`, *optional*):
+ Batch of masks indicating padded part.
+
+ Returns:
+ `torch.Tensor`: Batch of predicted durations in log domain `(batch_size, max_text_length)`.
+
+ """
+ # (batch_size, input_dim, max_text_length)
+ hidden_states = encoder_hidden_states.transpose(1, -1)
+ for layer in self.conv_layers:
+ hidden_states = layer(hidden_states)
+
+ # NOTE: calculate in log domain, (batch_size, max_text_length)
+ hidden_states = self.linear(hidden_states.transpose(1, -1)).squeeze(-1)
+
+ if not self.training:
+ # NOTE: calculate in linear domain
+ hidden_states = torch.clamp(torch.round(hidden_states.exp() - self.log_domain_offset), min=0).long()
+
+ return hidden_states
+
+
+# Copied from transformers.models.speecht5.modeling_speecht5.SpeechT5BatchNormConvLayer
+class FastSpeech2ConformerBatchNormConvLayer(nn.Module):
+ def __init__(self, config, layer_id=0):
+ super().__init__()
+
+ if layer_id == 0:
+ in_conv_dim = config.num_mel_bins
+ else:
+ in_conv_dim = config.speech_decoder_postnet_units
+
+ if layer_id == config.speech_decoder_postnet_layers - 1:
+ out_conv_dim = config.num_mel_bins
+ else:
+ out_conv_dim = config.speech_decoder_postnet_units
+
+ self.conv = nn.Conv1d(
+ in_conv_dim,
+ out_conv_dim,
+ kernel_size=config.speech_decoder_postnet_kernel,
+ stride=1,
+ padding=(config.speech_decoder_postnet_kernel - 1) // 2,
+ bias=False,
+ )
+ self.batch_norm = nn.BatchNorm1d(out_conv_dim)
+
+ if layer_id < config.speech_decoder_postnet_layers - 1:
+ self.activation = nn.Tanh()
+ else:
+ self.activation = None
+
+ self.dropout = nn.Dropout(config.speech_decoder_postnet_dropout)
+
+ def forward(self, hidden_states):
+ hidden_states = self.conv(hidden_states)
+ hidden_states = self.batch_norm(hidden_states)
+ if self.activation is not None:
+ hidden_states = self.activation(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ return hidden_states
+
+
+class FastSpeech2ConformerSpeechDecoderPostnet(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.feat_out = nn.Linear(config.hidden_size, config.num_mel_bins * config.reduction_factor)
+ self.layers = nn.ModuleList(
+ [FastSpeech2ConformerBatchNormConvLayer(config, i) for i in range(config.speech_decoder_postnet_layers)]
+ )
+
+ def forward(self, hidden_states: torch.Tensor):
+ outputs_before_postnet = self.feat_out(hidden_states).view(hidden_states.size(0), -1, self.config.num_mel_bins)
+ layer_output = outputs_before_postnet.transpose(1, 2)
+ for layer in self.layers:
+ layer_output = layer(layer_output)
+ outputs_after_postnet = outputs_before_postnet + layer_output.transpose(1, 2)
+ return outputs_before_postnet, outputs_after_postnet
+
+
+class FastSpeech2ConformerPredictorLayer(nn.Module):
+ def __init__(self, input_channels, num_chans, kernel_size, dropout_rate):
+ super().__init__()
+ self.conv = nn.Conv1d(
+ input_channels,
+ num_chans,
+ kernel_size,
+ stride=1,
+ padding=(kernel_size - 1) // 2,
+ )
+ self.activation = nn.ReLU()
+ self.layer_norm = nn.LayerNorm(num_chans)
+ self.dropout = nn.Dropout(dropout_rate)
+
+ def forward(self, hidden_states):
+ hidden_states = self.conv(hidden_states)
+ hidden_states = self.activation(hidden_states)
+
+ # Perform layer norm on dimension 1
+ hidden_states = hidden_states.transpose(1, -1)
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states = hidden_states.transpose(1, -1)
+
+ hidden_states = self.dropout(hidden_states)
+
+ return hidden_states
+
+
+class FastSpeech2ConformerVariancePredictor(nn.Module):
+ def __init__(
+ self,
+ config: FastSpeech2ConformerConfig,
+ num_layers=2,
+ num_chans=384,
+ kernel_size=3,
+ dropout_rate=0.5,
+ ):
+ """
+ Initilize variance predictor module.
+
+ Args:
+ input_dim (`int`): Input dimension.
+ num_layers (`int`, *optional*, defaults to 2): Number of convolutional layers.
+ num_chans (`int`, *optional*, defaults to 384): Number of channels of convolutional layers.
+ kernel_size (`int`, *optional*, defaults to 3): Kernel size of convolutional layers.
+ dropout_rate (`float`, *optional*, defaults to 0.5): Dropout rate.
+ """
+ super().__init__()
+ self.conv_layers = nn.ModuleList()
+ for idx in range(num_layers):
+ input_channels = config.hidden_size if idx == 0 else num_chans
+ layer = FastSpeech2ConformerPredictorLayer(input_channels, num_chans, kernel_size, dropout_rate)
+ self.conv_layers.append(layer)
+ self.linear = nn.Linear(num_chans, 1)
+
+ def forward(self, encoder_hidden_states, padding_masks=None):
+ """
+ Calculate forward propagation.
+
+ Args:
+ encoder_hidden_states (`torch.Tensor` of shape `(batch_size, max_text_length, input_dim)`):
+ Batch of input sequences.
+ padding_masks (`torch.ByteTensor` of shape `(batch_size, max_text_length)`, *optional*):
+ Batch of masks indicating padded part.
+
+ Returns:
+ Tensor: Batch of predicted sequences `(batch_size, max_text_length, 1)`.
+ """
+ # (batch_size, input_dim, max_text_length)
+ hidden_states = encoder_hidden_states.transpose(1, -1)
+ for layer in self.conv_layers:
+ hidden_states = layer(hidden_states)
+
+ hidden_states = self.linear(hidden_states.transpose(1, 2))
+
+ if padding_masks is not None:
+ hidden_states = hidden_states.masked_fill(padding_masks, 0.0)
+
+ return hidden_states
+
+
+class FastSpeech2ConformerVarianceEmbedding(nn.Module):
+ def __init__(
+ self,
+ in_channels=1,
+ out_channels=384,
+ kernel_size=1,
+ padding=0,
+ dropout_rate=0.0,
+ ):
+ super().__init__()
+ self.conv = nn.Conv1d(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=kernel_size,
+ padding=padding,
+ )
+ self.dropout = nn.Dropout(dropout_rate)
+
+ def forward(self, hidden_states):
+ hidden_states = hidden_states.transpose(1, 2)
+ hidden_states = self.conv(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = hidden_states.transpose(1, 2)
+ return hidden_states
+
+
+class FastSpeech2ConformerAttention(nn.Module):
+ """
+ Multi-Head attention layer with relative position encoding. Details can be found in
+ https://github.com/espnet/espnet/pull/2816. Paper: https://arxiv.org/abs/1901.02860.
+ """
+
+ def __init__(self, config: FastSpeech2ConformerConfig, module_config):
+ """Construct an FastSpeech2ConformerAttention object."""
+ super().__init__()
+ # We assume d_v always equals dim_key
+ self.num_heads = module_config["num_attention_heads"]
+ self.hidden_size = config.hidden_size
+ self.dim_key = self.hidden_size // self.num_heads
+ self.head_dim = self.hidden_size // self.num_heads
+ self.linear_q = nn.Linear(self.hidden_size, self.hidden_size)
+ self.linear_k = nn.Linear(self.hidden_size, self.hidden_size)
+ self.linear_v = nn.Linear(self.hidden_size, self.hidden_size)
+ self.linear_out = nn.Linear(self.hidden_size, self.hidden_size)
+ self.dropout = nn.Dropout(p=module_config["attention_dropout_rate"])
+
+ # linear transformation for positional encoding
+ self.linear_pos = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
+ # these two learnable bias are used in matrix c and matrix d
+ # as described in https://arxiv.org/abs/1901.02860 Section 3.3
+ self.pos_bias_u = nn.Parameter(torch.Tensor(self.num_heads, self.head_dim))
+ self.pos_bias_v = nn.Parameter(torch.Tensor(self.num_heads, self.head_dim))
+
+ def shift_relative_position_tensor(self, pos_tensor):
+ """
+ Args:
+ pos_tensor (torch.Tensor of shape (batch_size, head, time1, 2*time1-1)): Input tensor.
+ """
+ zero_pad = torch.zeros((*pos_tensor.size()[:3], 1), device=pos_tensor.device, dtype=pos_tensor.dtype)
+ pos_tensor_padded = torch.cat([zero_pad, pos_tensor], dim=-1)
+
+ pos_tensor_padded = pos_tensor_padded.view(*pos_tensor.size()[:2], pos_tensor.size(3) + 1, pos_tensor.size(2))
+ # only keep the positions from 0 to time2
+ pos_tensor = pos_tensor_padded[:, :, 1:].view_as(pos_tensor)[:, :, :, : pos_tensor.size(-1) // 2 + 1]
+
+ return pos_tensor
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ pos_emb: Optional[torch.Tensor] = None,
+ output_attentions: Optional[torch.Tensor] = False,
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
+ """
+ Compute 'Scaled Dot Product Attention' with rel. positional encoding.
+
+ Args:
+ hidden_states (`torch.Tensor` of shape `(batch, time2, size)`): Values of the hidden states
+ attention_mask (`torch.Tensor` of shape `(batch, time1, time2)`): Mask tensor.
+ pos_emb (`torch.Tensor` of shape `(batch, 2*time1-1, size)`): Positional embedding tensor.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ Returns:
+ `torch.Tensor`: Output tensor of shape `(batch, time1, d_model)`.
+ """
+ bsz, q_len, _ = hidden_states.size()
+ query_states = self.linear_q(hidden_states).view(bsz, -1, self.num_heads, self.head_dim)
+ key_states = self.linear_k(hidden_states).view(bsz, -1, self.num_heads, self.head_dim)
+ value_states = self.linear_v(hidden_states).view(bsz, -1, self.num_heads, self.head_dim)
+
+ bsz_pos = pos_emb.size(0)
+ pos_encoding = self.linear_pos(pos_emb).view(bsz_pos, -1, self.num_heads, self.head_dim)
+
+ # (batch_size, head, time1, dim_key)
+ query_with_bias_u = (query_states + self.pos_bias_u).transpose(1, 2)
+ # (batch_size, head, time1, dim_key)
+ query_with_bias_v = (query_states + self.pos_bias_v).transpose(1, 2)
+
+ # compute attention score
+ # first compute matrix a and matrix c
+ # as described in https://arxiv.org/abs/1901.02860 Section 3.3
+ # (batch_size, head, time1, time2)
+ matrix_ac = torch.matmul(query_with_bias_u, key_states.permute(0, 2, 3, 1))
+
+ # compute matrix b and matrix d
+ # (batch_size, head, time1, 2*time1-1)
+ matrix_bd = torch.matmul(query_with_bias_v, pos_encoding.permute(0, 2, 3, 1))
+ matrix_bd = self.shift_relative_position_tensor(matrix_bd)
+
+ # (batch_size, head, time1, time2)
+ scores = (matrix_ac + matrix_bd) / math.sqrt(self.dim_key)
+
+ # Forward attention
+ if attention_mask is not None:
+ expected_size = (bsz, 1, q_len)
+ if attention_mask.size() != expected_size:
+ raise ValueError(f"Attention mask should be of size {expected_size}, but is {attention_mask.size()}")
+ attention_mask = attention_mask.unsqueeze(1).eq(0)
+ min_value = float(torch.finfo(scores.dtype).min)
+ scores = scores.masked_fill(attention_mask, min_value)
+ attn_weights = torch.softmax(scores, dim=-1).masked_fill(attention_mask, 0.0)
+ else:
+ attn_weights = torch.softmax(scores, dim=-1)
+
+ attn_weights = self.dropout(attn_weights)
+ attn_output = torch.matmul(attn_weights, value_states.transpose(1, 2))
+ attn_output = attn_output.transpose(1, 2).contiguous().view(bsz, q_len, -1)
+
+ attn_output = self.linear_out(attn_output)
+
+ if not output_attentions:
+ attn_weights = None
+
+ return attn_output, attn_weights
+
+
+class FastSpeech2ConformerConvolutionModule(nn.Module):
+ def __init__(self, config: FastSpeech2ConformerConfig, module_config):
+ super().__init__()
+ # kernel_size should be an odd number for 'SAME' padding
+ channels = config.hidden_size
+ kernel_size = module_config["kernel_size"]
+ self.pointwise_conv1 = nn.Conv1d(channels, 2 * channels, kernel_size=1, stride=1, padding=0, bias=True)
+ self.depthwise_conv = nn.Conv1d(
+ channels, channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2, groups=channels, bias=True
+ )
+ self.norm = nn.BatchNorm1d(channels)
+ self.pointwise_conv2 = nn.Conv1d(channels, channels, kernel_size=1, stride=1, padding=0, bias=True)
+
+ def forward(self, hidden_states):
+ """
+ Compute convolution module.
+
+ Args:
+ hidden_states (`torch.Tensor` of shape `(batch, time, channels)`): Input tensor.
+
+ Returns:
+ `torch.Tensor`: Output tensor of shape `(batch, time, channels)`.
+
+ """
+ # exchange the temporal dimension and the feature dimension
+ hidden_states = hidden_states.transpose(1, 2)
+
+ # GLU mechanism, (batch_size, 2*channel, dim)
+ hidden_states = self.pointwise_conv1(hidden_states)
+ # (batch_size, channel, dim)
+ hidden_states = nn.functional.glu(hidden_states, dim=1)
+
+ # 1D Depthwise Conv
+ hidden_states = self.depthwise_conv(hidden_states)
+ hidden_states = self.norm(hidden_states)
+
+ hidden_states = hidden_states * torch.sigmoid(hidden_states)
+
+ hidden_states = self.pointwise_conv2(hidden_states)
+
+ return hidden_states.transpose(1, 2)
+
+
+class FastSpeech2ConformerEncoderLayer(nn.Module):
+ def __init__(self, config: FastSpeech2ConformerConfig, module_config):
+ super().__init__()
+
+ # self-attention module definition
+ self.self_attn = FastSpeech2ConformerAttention(config, module_config)
+
+ # feed-forward module definition
+ self.feed_forward = FastSpeech2ConformerMultiLayeredConv1d(config, module_config)
+
+ self.macaron_style = config.use_macaron_style_in_conformer
+ if self.macaron_style:
+ self.feed_forward_macaron = FastSpeech2ConformerMultiLayeredConv1d(config, module_config)
+ self.ff_macaron_layer_norm = nn.LayerNorm(config.hidden_size)
+ self.ff_scale = 0.5
+ else:
+ self.ff_scale = 1.0
+
+ # convolution module definition
+ self.use_cnn_module = config.use_cnn_in_conformer
+ if self.use_cnn_module:
+ self.conv_module = FastSpeech2ConformerConvolutionModule(config, module_config)
+ self.conv_layer_norm = nn.LayerNorm(config.hidden_size)
+ self.final_layer_norm = nn.LayerNorm(config.hidden_size)
+
+ self.ff_layer_norm = nn.LayerNorm(config.hidden_size)
+
+ self.self_attn_layer_norm = nn.LayerNorm(config.hidden_size)
+
+ self.dropout = nn.Dropout(module_config["dropout_rate"])
+ self.size = config.hidden_size
+ self.normalize_before = module_config["normalize_before"]
+ self.concat_after = module_config["concat_after"]
+ if self.concat_after:
+ self.concat_linear = nn.Linear(config.hidden_size + config.hidden_size, config.hidden_size)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ pos_emb: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[torch.Tensor] = False,
+ ):
+ """
+ Compute encoded features.
+
+ Args:
+ hidden_states (`torch.Tensor` of shape `(batch, time, size)`): Input tensor.
+ pos_emb (`torch.Tensor` of shape `(1, time, size)`): Positional embeddings tensor.
+ attention_mask (`torch.Tensor` of shape `(batch, time)`): Attention mask tensor for the input.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ Returns:
+ `torch.Tensor`: Output tensor of shape `(batch, time, size)`.
+
+ """
+ # whether to use macaron style
+ if self.macaron_style:
+ residual = hidden_states
+ if self.normalize_before:
+ hidden_states = self.ff_macaron_layer_norm(hidden_states)
+ hidden_states = residual + self.ff_scale * self.dropout(self.feed_forward_macaron(hidden_states))
+ if not self.normalize_before:
+ hidden_states = self.ff_macaron_layer_norm(hidden_states)
+
+ # multi-headed self-attention module
+ residual = hidden_states
+ if self.normalize_before:
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ attention_output, attention_scores = self.self_attn(
+ hidden_states, attention_mask=attention_mask, pos_emb=pos_emb, output_attentions=output_attentions
+ )
+
+ if self.concat_after:
+ x_concat = torch.cat((hidden_states, attention_output), dim=-1)
+ hidden_states = self.concat_linear(x_concat)
+ hidden_states = residual + hidden_states
+ else:
+ hidden_states = self.dropout(attention_output)
+ hidden_states = residual + hidden_states
+ if not self.normalize_before:
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # convolution module
+ if self.use_cnn_module:
+ residual = hidden_states
+ if self.normalize_before:
+ hidden_states = self.conv_layer_norm(hidden_states)
+ hidden_states = self.conv_module(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = residual + hidden_states
+ if not self.normalize_before:
+ hidden_states = self.conv_layer_norm(hidden_states)
+
+ # feed forward module
+ residual = hidden_states
+ if self.normalize_before:
+ hidden_states = self.ff_layer_norm(hidden_states)
+ hidden_states = self.feed_forward(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = residual + self.ff_scale * hidden_states
+ if not self.normalize_before:
+ hidden_states = self.ff_layer_norm(hidden_states)
+
+ if self.conv_module is not None:
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attention_scores,)
+
+ return outputs
+
+
+class FastSpeech2ConformerMultiLayeredConv1d(nn.Module):
+ """
+ Multi-layered conv1d for Transformer block.
+
+ This is a module of multi-layered conv1d designed to replace positionwise feed-forward network in Transformer
+ block, which is introduced in 'FastSpeech: Fast, Robust and Controllable Text to Speech'
+ https://arxiv.org/pdf/1905.09263.pdf
+ """
+
+ def __init__(self, config: FastSpeech2ConformerConfig, module_config):
+ """
+ Initialize FastSpeech2ConformerMultiLayeredConv1d module.
+
+ Args:
+ input_channels (`int`): Number of input channels.
+ hidden_channels (`int`): Number of hidden channels.
+ kernel_size (`int`): Kernel size of conv1d.
+ dropout_rate (`float`): Dropout rate.
+ """
+ super().__init__()
+ input_channels = config.hidden_size
+ hidden_channels = module_config["linear_units"]
+ kernel_size = config.positionwise_conv_kernel_size
+ self.conv1 = nn.Conv1d(input_channels, hidden_channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2)
+ self.conv2 = nn.Conv1d(hidden_channels, input_channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2)
+ self.dropout = nn.Dropout(module_config["dropout_rate"])
+
+ def forward(self, hidden_states):
+ """
+ Calculate forward propagation.
+
+ Args:
+ hidden_states (torch.Tensor): Batch of input tensors (batch_size, time, input_channels).
+
+ Returns:
+ torch.Tensor: Batch of output tensors (batch_size, time, hidden_channels).
+ """
+ hidden_states = hidden_states.transpose(-1, 1)
+ hidden_states = self.conv1(hidden_states)
+ hidden_states = torch.relu(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.conv2(hidden_states)
+ hidden_states = hidden_states.transpose(-1, 1)
+ return hidden_states
+
+
+class FastSpeech2ConformerRelPositionalEncoding(nn.Module):
+ """
+ Args:
+ Relative positional encoding module (new implementation). Details can be found in
+ https://github.com/espnet/espnet/pull/2816. See : Appendix Batch in https://arxiv.org/abs/1901.02860
+ config (`FastSpeech2ConformerConfig`):
+ FastSpeech2ConformerConfig instance.
+ module_config (`dict`):
+ Dictionary containing the encoder or decoder module configuration from the `FastSpeech2ConformerConfig`.
+ """
+
+ def __init__(self, config: FastSpeech2ConformerConfig, module_config):
+ """
+ Construct an PositionalEncoding object.
+ """
+ super().__init__()
+ self.embed_dim = config.hidden_size
+ self.input_scale = math.sqrt(self.embed_dim)
+ self.dropout = nn.Dropout(p=module_config["positional_dropout_rate"])
+ self.pos_enc = None
+ self.max_len = 5000
+ self.extend_pos_enc(torch.tensor(0.0).expand(1, self.max_len))
+
+ def extend_pos_enc(self, x):
+ """Reset the positional encodings."""
+ if self.pos_enc is not None:
+ # self.pos_enc contains both positive and negative parts
+ # the length of self.pos_enc is 2 * input_len - 1
+ if self.pos_enc.size(1) >= x.size(1) * 2 - 1:
+ if self.pos_enc.dtype != x.dtype or self.pos_enc.device != x.device:
+ self.pos_enc = self.pos_enc.to(dtype=x.dtype, device=x.device)
+ return
+ # Suppose `i` means to the position of query vector and `j` means the
+ # position of key vector. We use position relative positions when keys
+ # are to the left (i>j) and negative relative positions otherwise (i 1
+ if self.multilingual_model:
+ self.language_id_embedding = torch.nn.Embedding(config.num_languages, self.hidden_size)
+
+ self.multispeaker_model = config.num_speakers is not None and config.num_speakers > 1
+ if self.multispeaker_model:
+ self.speaker_id_embedding = torch.nn.Embedding(config.num_speakers, config.hidden_size)
+
+ self.speaker_embed_dim = config.speaker_embed_dim
+ if self.speaker_embed_dim:
+ self.projection = nn.Linear(config.hidden_size + self.speaker_embed_dim, config.hidden_size)
+
+ self.encoder = FastSpeech2ConformerEncoder(config, config.encoder_config, use_encoder_input_layer=True)
+
+ self.duration_predictor = FastSpeech2ConformerDurationPredictor(config)
+
+ self.pitch_predictor = FastSpeech2ConformerVariancePredictor(
+ config,
+ num_layers=config.pitch_predictor_layers,
+ num_chans=config.pitch_predictor_channels,
+ kernel_size=config.pitch_predictor_kernel_size,
+ dropout_rate=config.pitch_predictor_dropout,
+ )
+ # continuous pitch + FastPitch style avg
+ self.pitch_embed = FastSpeech2ConformerVarianceEmbedding(
+ out_channels=self.hidden_size,
+ kernel_size=config.pitch_embed_kernel_size,
+ padding=(config.pitch_embed_kernel_size - 1) // 2,
+ dropout_rate=config.pitch_embed_dropout,
+ )
+
+ self.energy_predictor = FastSpeech2ConformerVariancePredictor(
+ config,
+ num_layers=config.energy_predictor_layers,
+ num_chans=config.energy_predictor_channels,
+ kernel_size=config.energy_predictor_kernel_size,
+ dropout_rate=config.energy_predictor_dropout,
+ )
+ # continuous energy + FastPitch style avg
+ self.energy_embed = FastSpeech2ConformerVarianceEmbedding(
+ out_channels=self.hidden_size,
+ kernel_size=config.energy_embed_kernel_size,
+ padding=(config.energy_embed_kernel_size - 1) // 2,
+ dropout_rate=config.energy_embed_dropout,
+ )
+
+ # The decoder is an encoder
+ self.decoder = FastSpeech2ConformerEncoder(config, config.decoder_config, use_encoder_input_layer=False)
+
+ self.speech_decoder_postnet = FastSpeech2ConformerSpeechDecoderPostnet(config)
+
+ self.criterion = FastSpeech2ConformerLoss(config)
+
+ self.post_init()
+
+ @replace_return_docstrings(output_type=FastSpeech2ConformerModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: torch.LongTensor,
+ attention_mask: Optional[torch.LongTensor] = None,
+ spectrogram_labels: Optional[torch.FloatTensor] = None,
+ duration_labels: Optional[torch.LongTensor] = None,
+ pitch_labels: Optional[torch.FloatTensor] = None,
+ energy_labels: Optional[torch.FloatTensor] = None,
+ speaker_ids: Optional[torch.LongTensor] = None,
+ lang_ids: Optional[torch.LongTensor] = None,
+ speaker_embedding: Optional[torch.FloatTensor] = None,
+ return_dict: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ ) -> Union[Tuple, FastSpeech2ConformerModelOutput]:
+ """
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Input sequence of text vectors.
+ attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*, defaults to `None`):
+ Mask to avoid performing convolution and attention on padding token indices. Mask values selected in
+ `[0, 1]`: 0 for tokens that are **masked**, 1 for tokens that are **not masked**.
+ spectrogram_labels (`torch.FloatTensor` of shape `(batch_size, max_spectrogram_length, num_mel_bins)`, *optional*, defaults to `None`):
+ Batch of padded target features.
+ duration_labels (`torch.LongTensor` of shape `(batch_size, sequence_length + 1)`, *optional*, defaults to `None`):
+ Batch of padded durations.
+ pitch_labels (`torch.FloatTensor` of shape `(batch_size, sequence_length + 1, 1)`, *optional*, defaults to `None`):
+ Batch of padded token-averaged pitch.
+ energy_labels (`torch.FloatTensor` of shape `(batch_size, sequence_length + 1, 1)`, *optional*, defaults to `None`):
+ Batch of padded token-averaged energy.
+ speaker_ids (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*, defaults to `None`):
+ Speaker ids used to condition features of speech output by the model.
+ lang_ids (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*, defaults to `None`):
+ Language ids used to condition features of speech output by the model.
+ speaker_embedding (`torch.FloatTensor` of shape `(batch_size, embedding_dim)`, *optional*, defaults to `None`):
+ Embedding containing conditioning signals for the features of the speech.
+ return_dict (`bool`, *optional*, defaults to `None`):
+ Whether or not to return a [`FastSpeech2ConformerModelOutput`] instead of a plain tuple.
+ output_attentions (`bool`, *optional*, defaults to `None`):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*, defaults to `None`):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import (
+ ... FastSpeech2ConformerTokenizer,
+ ... FastSpeech2ConformerModel,
+ ... FastSpeech2ConformerHifiGan,
+ ... )
+
+ >>> tokenizer = FastSpeech2ConformerTokenizer.from_pretrained("espnet/fastspeech2_conformer")
+ >>> inputs = tokenizer("some text to convert to speech", return_tensors="pt")
+ >>> input_ids = inputs["input_ids"]
+
+ >>> model = FastSpeech2ConformerModel.from_pretrained("espnet/fastspeech2_conformer")
+ >>> output_dict = model(input_ids, return_dict=True)
+ >>> spectrogram = output_dict["spectrogram"]
+
+ >>> vocoder = FastSpeech2ConformerHifiGan.from_pretrained("espnet/fastspeech2_conformer_hifigan")
+ >>> waveform = vocoder(spectrogram)
+ >>> print(waveform.shape)
+ torch.Size([1, 49664])
+ ```
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+
+ if attention_mask is None:
+ attention_mask = torch.ones(input_ids.shape, device=input_ids.device)
+
+ has_missing_labels = (
+ spectrogram_labels is None or duration_labels is None or pitch_labels is None or energy_labels is None
+ )
+ if self.training and has_missing_labels:
+ raise ValueError("All labels must be provided to run in training mode.")
+
+ # forward encoder
+ text_masks = attention_mask.unsqueeze(-2)
+
+ encoder_outputs = self.encoder(
+ input_ids,
+ text_masks,
+ output_hidden_states=output_hidden_states,
+ output_attentions=output_attentions,
+ return_dict=return_dict,
+ )
+ hidden_states = encoder_outputs[0]
+
+ # Integrate with language id, speaker id, and speaker embedding
+ if self.multispeaker_model and speaker_ids is not None:
+ speaker_id_embeddings = self.speaker_id_embedding(speaker_ids.view(-1))
+ hidden_states = hidden_states + speaker_id_embeddings.unsqueeze(1)
+
+ if self.multilingual_model and lang_ids is not None:
+ language_id_embbedings = self.language_id_embedding(lang_ids.view(-1))
+ hidden_states = hidden_states + language_id_embbedings.unsqueeze(1)
+
+ if self.speaker_embed_dim is not None and speaker_embedding is not None:
+ embeddings_expanded = (
+ nn.functional.normalize(speaker_embedding).unsqueeze(1).expand(-1, hidden_states.size(1), -1)
+ )
+ hidden_states = self.projection(torch.cat([hidden_states, embeddings_expanded], dim=-1))
+
+ # forward duration predictor and variance predictors
+ duration_mask = ~attention_mask.bool()
+
+ if self.stop_gradient_from_pitch_predictor:
+ pitch_predictions = self.pitch_predictor(hidden_states.detach(), duration_mask.unsqueeze(-1))
+ else:
+ pitch_predictions = self.pitch_predictor(hidden_states, duration_mask.unsqueeze(-1))
+
+ if self.stop_gradient_from_energy_predictor:
+ energy_predictions = self.energy_predictor(hidden_states.detach(), duration_mask.unsqueeze(-1))
+ else:
+ energy_predictions = self.energy_predictor(hidden_states, duration_mask.unsqueeze(-1))
+
+ duration_predictions = self.duration_predictor(hidden_states)
+ duration_predictions = duration_predictions.masked_fill(duration_mask, 0.0)
+
+ if not self.training:
+ # use prediction in inference
+ embedded_pitch_curve = self.pitch_embed(pitch_predictions)
+ embedded_energy_curve = self.energy_embed(energy_predictions)
+ hidden_states = hidden_states + embedded_energy_curve + embedded_pitch_curve
+ hidden_states = length_regulator(hidden_states, duration_predictions, self.config.speaking_speed)
+ else:
+ # use groundtruth in training
+ embedded_pitch_curve = self.pitch_embed(pitch_labels)
+ embedded_energy_curve = self.energy_embed(energy_labels)
+ hidden_states = hidden_states + embedded_energy_curve + embedded_pitch_curve
+ hidden_states = length_regulator(hidden_states, duration_labels)
+
+ # forward decoder
+ if not self.training:
+ hidden_mask = None
+ else:
+ spectrogram_mask = (spectrogram_labels != -100).any(dim=-1)
+ spectrogram_mask = spectrogram_mask.int()
+ if self.reduction_factor > 1:
+ length_dim = spectrogram_mask.shape[1] - spectrogram_mask.shape[1] % self.reduction_factor
+ spectrogram_mask = spectrogram_mask[:, :, :length_dim]
+ hidden_mask = spectrogram_mask.unsqueeze(-2)
+
+ decoder_outputs = self.decoder(
+ hidden_states,
+ hidden_mask,
+ output_hidden_states=output_hidden_states,
+ output_attentions=output_attentions,
+ return_dict=return_dict,
+ )
+
+ outputs_before_postnet, outputs_after_postnet = self.speech_decoder_postnet(decoder_outputs[0])
+
+ loss = None
+ if self.training:
+ # calculate loss
+ loss_duration_mask = ~duration_mask
+ loss_spectrogram_mask = spectrogram_mask.unsqueeze(-1).bool()
+ loss = self.criterion(
+ outputs_after_postnet=outputs_after_postnet,
+ outputs_before_postnet=outputs_before_postnet,
+ duration_outputs=duration_predictions,
+ pitch_outputs=pitch_predictions,
+ energy_outputs=energy_predictions,
+ spectrogram_labels=spectrogram_labels,
+ duration_labels=duration_labels,
+ pitch_labels=pitch_labels,
+ energy_labels=energy_labels,
+ duration_mask=loss_duration_mask,
+ spectrogram_mask=loss_spectrogram_mask,
+ )
+
+ if not return_dict:
+ postnet_outputs = (outputs_after_postnet,)
+ audio_feature_predictions = (
+ duration_predictions,
+ pitch_predictions,
+ energy_predictions,
+ )
+ outputs = postnet_outputs + encoder_outputs + decoder_outputs[1:] + audio_feature_predictions
+ return ((loss,) + outputs) if loss is not None else outputs
+
+ return FastSpeech2ConformerModelOutput(
+ loss=loss,
+ spectrogram=outputs_after_postnet,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ duration_outputs=duration_predictions,
+ pitch_outputs=pitch_predictions,
+ energy_outputs=energy_predictions,
+ )
+
+
+# Copied from transformers.models.speecht5.modeling_speecht5.HifiGanResidualBlock
+class HifiGanResidualBlock(nn.Module):
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5), leaky_relu_slope=0.1):
+ super().__init__()
+ self.leaky_relu_slope = leaky_relu_slope
+
+ self.convs1 = nn.ModuleList(
+ [
+ nn.Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ stride=1,
+ dilation=dilation[i],
+ padding=self.get_padding(kernel_size, dilation[i]),
+ )
+ for i in range(len(dilation))
+ ]
+ )
+ self.convs2 = nn.ModuleList(
+ [
+ nn.Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ stride=1,
+ dilation=1,
+ padding=self.get_padding(kernel_size, 1),
+ )
+ for _ in range(len(dilation))
+ ]
+ )
+
+ def get_padding(self, kernel_size, dilation=1):
+ return (kernel_size * dilation - dilation) // 2
+
+ def apply_weight_norm(self):
+ for layer in self.convs1:
+ nn.utils.weight_norm(layer)
+ for layer in self.convs2:
+ nn.utils.weight_norm(layer)
+
+ def remove_weight_norm(self):
+ for layer in self.convs1:
+ nn.utils.remove_weight_norm(layer)
+ for layer in self.convs2:
+ nn.utils.remove_weight_norm(layer)
+
+ def forward(self, hidden_states):
+ for conv1, conv2 in zip(self.convs1, self.convs2):
+ residual = hidden_states
+ hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope)
+ hidden_states = conv1(hidden_states)
+ hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope)
+ hidden_states = conv2(hidden_states)
+ hidden_states = hidden_states + residual
+ return hidden_states
+
+
+@add_start_docstrings(
+ """HiFi-GAN vocoder.""",
+ HIFIGAN_START_DOCSTRING,
+)
+# Copied from transformers.models.speecht5.modeling_speecht5.SpeechT5HifiGan with SpeechT5->FastSpeech2Conformer
+class FastSpeech2ConformerHifiGan(PreTrainedModel):
+ config_class = FastSpeech2ConformerHifiGanConfig
+ main_input_name = "spectrogram"
+
+ def __init__(self, config: FastSpeech2ConformerHifiGanConfig):
+ super().__init__(config)
+ self.num_kernels = len(config.resblock_kernel_sizes)
+ self.num_upsamples = len(config.upsample_rates)
+ self.conv_pre = nn.Conv1d(
+ config.model_in_dim,
+ config.upsample_initial_channel,
+ kernel_size=7,
+ stride=1,
+ padding=3,
+ )
+
+ self.upsampler = nn.ModuleList()
+ for i, (upsample_rate, kernel_size) in enumerate(zip(config.upsample_rates, config.upsample_kernel_sizes)):
+ self.upsampler.append(
+ nn.ConvTranspose1d(
+ config.upsample_initial_channel // (2**i),
+ config.upsample_initial_channel // (2 ** (i + 1)),
+ kernel_size=kernel_size,
+ stride=upsample_rate,
+ padding=(kernel_size - upsample_rate) // 2,
+ )
+ )
+
+ self.resblocks = nn.ModuleList()
+ for i in range(len(self.upsampler)):
+ channels = config.upsample_initial_channel // (2 ** (i + 1))
+ for kernel_size, dilation in zip(config.resblock_kernel_sizes, config.resblock_dilation_sizes):
+ self.resblocks.append(HifiGanResidualBlock(channels, kernel_size, dilation, config.leaky_relu_slope))
+
+ self.conv_post = nn.Conv1d(channels, 1, kernel_size=7, stride=1, padding=3)
+
+ self.register_buffer("mean", torch.zeros(config.model_in_dim))
+ self.register_buffer("scale", torch.ones(config.model_in_dim))
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def _init_weights(self, module):
+ """Initialize the weights."""
+ if isinstance(module, (nn.Linear, nn.Conv1d)):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+
+ def apply_weight_norm(self):
+ nn.utils.weight_norm(self.conv_pre)
+ for layer in self.upsampler:
+ nn.utils.weight_norm(layer)
+ for layer in self.resblocks:
+ layer.apply_weight_norm()
+ nn.utils.weight_norm(self.conv_post)
+
+ def remove_weight_norm(self):
+ nn.utils.remove_weight_norm(self.conv_pre)
+ for layer in self.upsampler:
+ nn.utils.remove_weight_norm(layer)
+ for layer in self.resblocks:
+ layer.remove_weight_norm()
+ nn.utils.remove_weight_norm(self.conv_post)
+
+ def forward(self, spectrogram: torch.FloatTensor) -> torch.FloatTensor:
+ r"""
+ Converts a log-mel spectrogram into a speech waveform. Passing a batch of log-mel spectrograms returns a batch
+ of speech waveforms. Passing a single, un-batched log-mel spectrogram returns a single, un-batched speech
+ waveform.
+
+ Args:
+ spectrogram (`torch.FloatTensor`):
+ Tensor containing the log-mel spectrograms. Can be batched and of shape `(batch_size, sequence_length,
+ config.model_in_dim)`, or un-batched and of shape `(sequence_length, config.model_in_dim)`.
+
+ Returns:
+ `torch.FloatTensor`: Tensor containing the speech waveform. If the input spectrogram is batched, will be of
+ shape `(batch_size, num_frames,)`. If un-batched, will be of shape `(num_frames,)`.
+ """
+ if self.config.normalize_before:
+ spectrogram = (spectrogram - self.mean) / self.scale
+
+ is_batched = spectrogram.dim() == 3
+ if not is_batched:
+ spectrogram = spectrogram.unsqueeze(0)
+
+ hidden_states = spectrogram.transpose(2, 1)
+
+ hidden_states = self.conv_pre(hidden_states)
+ for i in range(self.num_upsamples):
+ hidden_states = nn.functional.leaky_relu(hidden_states, self.config.leaky_relu_slope)
+ hidden_states = self.upsampler[i](hidden_states)
+
+ res_state = self.resblocks[i * self.num_kernels](hidden_states)
+ for j in range(1, self.num_kernels):
+ res_state += self.resblocks[i * self.num_kernels + j](hidden_states)
+ hidden_states = res_state / self.num_kernels
+
+ hidden_states = nn.functional.leaky_relu(hidden_states)
+ hidden_states = self.conv_post(hidden_states)
+ hidden_states = torch.tanh(hidden_states)
+
+ if not is_batched:
+ # remove batch dim and collapse tensor to 1-d audio waveform
+ waveform = hidden_states.squeeze(0).transpose(1, 0).view(-1)
+ else:
+ # remove seq-len dim since this collapses to 1
+ waveform = hidden_states.squeeze(1)
+
+ return waveform
+
+
+@add_start_docstrings(
+ "The FastSpeech2ConformerModel with a FastSpeech2ConformerHifiGan vocoder head that performs text-to-speech (waveform).",
+ FASTSPEECH2_CONFORMER_WITH_HIFIGAN_START_DOCSTRING,
+)
+class FastSpeech2ConformerWithHifiGan(PreTrainedModel):
+ config_class = FastSpeech2ConformerWithHifiGanConfig
+
+ def __init__(self, config: FastSpeech2ConformerWithHifiGanConfig):
+ super().__init__(config)
+
+ self.model = FastSpeech2ConformerModel(config.model_config)
+ self.vocoder = FastSpeech2ConformerHifiGan(config.vocoder_config)
+
+ self.config = config
+
+ @replace_return_docstrings(
+ output_type=FastSpeech2ConformerWithHifiGanOutput, config_class=FastSpeech2ConformerWithHifiGanConfig
+ )
+ def forward(
+ self,
+ input_ids: torch.LongTensor,
+ attention_mask: Optional[torch.LongTensor] = None,
+ spectrogram_labels: Optional[torch.FloatTensor] = None,
+ duration_labels: Optional[torch.LongTensor] = None,
+ pitch_labels: Optional[torch.FloatTensor] = None,
+ energy_labels: Optional[torch.FloatTensor] = None,
+ speaker_ids: Optional[torch.LongTensor] = None,
+ lang_ids: Optional[torch.LongTensor] = None,
+ speaker_embedding: Optional[torch.FloatTensor] = None,
+ return_dict: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ ) -> Union[Tuple, FastSpeech2ConformerModelOutput]:
+ """
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Input sequence of text vectors.
+ attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*, defaults to `None`):
+ Mask to avoid performing convolution and attention on padding token indices. Mask values selected in
+ `[0, 1]`: 0 for tokens that are **masked**, 1 for tokens that are **not masked**.
+ spectrogram_labels (`torch.FloatTensor` of shape `(batch_size, max_spectrogram_length, num_mel_bins)`, *optional*, defaults to `None`):
+ Batch of padded target features.
+ duration_labels (`torch.LongTensor` of shape `(batch_size, sequence_length + 1)`, *optional*, defaults to `None`):
+ Batch of padded durations.
+ pitch_labels (`torch.FloatTensor` of shape `(batch_size, sequence_length + 1, 1)`, *optional*, defaults to `None`):
+ Batch of padded token-averaged pitch.
+ energy_labels (`torch.FloatTensor` of shape `(batch_size, sequence_length + 1, 1)`, *optional*, defaults to `None`):
+ Batch of padded token-averaged energy.
+ speaker_ids (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*, defaults to `None`):
+ Speaker ids used to condition features of speech output by the model.
+ lang_ids (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*, defaults to `None`):
+ Language ids used to condition features of speech output by the model.
+ speaker_embedding (`torch.FloatTensor` of shape `(batch_size, embedding_dim)`, *optional*, defaults to `None`):
+ Embedding containing conditioning signals for the features of the speech.
+ return_dict (`bool`, *optional*, defaults to `None`):
+ Whether or not to return a [`FastSpeech2ConformerModelOutput`] instead of a plain tuple.
+ output_attentions (`bool`, *optional*, defaults to `None`):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*, defaults to `None`):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import (
+ ... FastSpeech2ConformerTokenizer,
+ ... FastSpeech2ConformerWithHifiGan,
+ ... )
+
+ >>> tokenizer = FastSpeech2ConformerTokenizer.from_pretrained("espnet/fastspeech2_conformer")
+ >>> inputs = tokenizer("some text to convert to speech", return_tensors="pt")
+ >>> input_ids = inputs["input_ids"]
+
+ >>> model = FastSpeech2ConformerWithHifiGan.from_pretrained("espnet/fastspeech2_conformer_with_hifigan")
+ >>> output_dict = model(input_ids, return_dict=True)
+ >>> waveform = output_dict["waveform"]
+ >>> print(waveform.shape)
+ torch.Size([1, 49664])
+ ```
+ """
+ return_dict = return_dict if return_dict is not None else self.config.model_config.use_return_dict
+ output_attentions = (
+ output_attentions if output_attentions is not None else self.config.model_config.output_attentions
+ )
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.model_config.output_hidden_states
+ )
+
+ model_outputs = self.model(
+ input_ids,
+ attention_mask,
+ spectrogram_labels=spectrogram_labels,
+ duration_labels=duration_labels,
+ pitch_labels=pitch_labels,
+ energy_labels=energy_labels,
+ speaker_ids=speaker_ids,
+ lang_ids=lang_ids,
+ speaker_embedding=speaker_embedding,
+ return_dict=return_dict,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ )
+
+ if not return_dict:
+ has_missing_labels = (
+ spectrogram_labels is None or duration_labels is None or pitch_labels is None or energy_labels is None
+ )
+ if has_missing_labels:
+ spectrogram = model_outputs[0]
+ else:
+ spectrogram = model_outputs[1]
+ else:
+ spectrogram = model_outputs["spectrogram"]
+ waveform = self.vocoder(spectrogram)
+
+ if not return_dict:
+ return model_outputs + (waveform,)
+
+ return FastSpeech2ConformerWithHifiGanOutput(waveform=waveform, **model_outputs)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/tokenization_fastspeech2_conformer.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/tokenization_fastspeech2_conformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..c4fd208cef3b409e686276e3e015c05e4f7696df
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/tokenization_fastspeech2_conformer.py
@@ -0,0 +1,198 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for FastSpeech2Conformer."""
+import json
+import os
+from typing import Optional, Tuple
+
+import regex
+
+from ...tokenization_utils import PreTrainedTokenizer
+from ...utils import logging, requires_backends
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.json"}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {
+ "espnet/fastspeech2_conformer": "https://huggingface.co/espnet/fastspeech2_conformer/raw/main/vocab.json",
+ },
+}
+
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
+ # Set to somewhat arbitrary large number as the model input
+ # isn't constrained by the relative positional encoding
+ "espnet/fastspeech2_conformer": 4096,
+}
+
+
+class FastSpeech2ConformerTokenizer(PreTrainedTokenizer):
+ """
+ Construct a FastSpeech2Conformer tokenizer.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The begin of sequence token. Note that for FastSpeech2, it is the same as the `eos_token`.
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token. Note that for FastSpeech2, it is the same as the `bos_token`.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ should_strip_spaces (`bool`, *optional*, defaults to `False`):
+ Whether or not to strip the spaces from the list of tokens.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ model_input_names = ["input_ids", "attention_mask"]
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+
+ def __init__(
+ self,
+ vocab_file,
+ bos_token="",
+ eos_token="",
+ pad_token="",
+ unk_token="",
+ should_strip_spaces=False,
+ **kwargs,
+ ):
+ requires_backends(self, "g2p_en")
+
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
+ self.encoder = json.load(vocab_handle)
+
+ import g2p_en
+
+ self.g2p = g2p_en.G2p()
+
+ self.decoder = {v: k for k, v in self.encoder.items()}
+
+ super().__init__(
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ should_strip_spaces=should_strip_spaces,
+ **kwargs,
+ )
+
+ self.should_strip_spaces = should_strip_spaces
+
+ @property
+ def vocab_size(self):
+ return len(self.decoder)
+
+ def get_vocab(self):
+ "Returns vocab as a dict"
+ return dict(self.encoder, **self.added_tokens_encoder)
+
+ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
+ # expand symbols
+ text = regex.sub(";", ",", text)
+ text = regex.sub(":", ",", text)
+ text = regex.sub("-", " ", text)
+ text = regex.sub("&", "and", text)
+
+ # strip unnecessary symbols
+ text = regex.sub(r"[\(\)\[\]\<\>\"]+", "", text)
+
+ # strip whitespaces
+ text = regex.sub(r"\s+", " ", text)
+
+ text = text.upper()
+
+ return text, kwargs
+
+ def _tokenize(self, text):
+ """Returns a tokenized string."""
+ # phonemize
+ tokens = self.g2p(text)
+
+ if self.should_strip_spaces:
+ tokens = list(filter(lambda s: s != " ", tokens))
+
+ tokens.append(self.eos_token)
+
+ return tokens
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.decoder.get(index, self.unk_token)
+
+ # Override since phonemes cannot be converted back to strings
+ def decode(self, token_ids, **kwargs):
+ logger.warn(
+ "Phonemes cannot be reliably converted to a string due to the one-many mapping, converting to tokens instead."
+ )
+ return self.convert_ids_to_tokens(token_ids)
+
+ # Override since phonemes cannot be converted back to strings
+ def convert_tokens_to_string(self, tokens, **kwargs):
+ logger.warn(
+ "Phonemes cannot be reliably converted to a string due to the one-many mapping, returning the tokens."
+ )
+ return tokens
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ """
+ Save the vocabulary and special tokens file to a directory.
+
+ Args:
+ save_directory (`str`):
+ The directory in which to save the vocabulary.
+
+ Returns:
+ `Tuple(str)`: Paths to the files saved.
+ """
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ with open(vocab_file, "w", encoding="utf-8") as f:
+ f.write(json.dumps(self.get_vocab(), ensure_ascii=False))
+
+ return (vocab_file,)
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["g2p"] = None
+ return state
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+
+ try:
+ import g2p_en
+
+ self.g2p = g2p_en.G2p()
+ except ImportError:
+ raise ImportError(
+ "You need to install g2p-en to use FastSpeech2ConformerTokenizer. "
+ "See https://pypi.org/project/g2p-en/ for installation."
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1add713c44201d67af622e82ace9fd826014e0da
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/configuration_gptsan_japanese.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/configuration_gptsan_japanese.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7e726a746b1ad6141772ca62afbf487853d3ef47
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/configuration_gptsan_japanese.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/convert_gptsan_tf_checkpoint_to_pytorch.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/convert_gptsan_tf_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7658e22045778251a4755cb1a629fda1989b140f
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/convert_gptsan_tf_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/modeling_gptsan_japanese.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/modeling_gptsan_japanese.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..307ab1e9dbe9453ebe574e7c012d54992274b36a
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/modeling_gptsan_japanese.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/nougat/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/nougat/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3cc8bbddf9e9ca6446b5a9c5f73c2cc4eb27975e
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/nougat/__init__.py
@@ -0,0 +1,63 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_vision_available
+
+
+_import_structure = {
+ "processing_nougat": ["NougatProcessor"],
+}
+
+try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_nougat_fast"] = ["NougatTokenizerFast"]
+
+try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["image_processing_nougat"] = ["NougatImageProcessor"]
+
+
+if TYPE_CHECKING:
+ from .processing_nougat import NougatProcessor
+
+ try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_nougat_fast import NougatTokenizerFast
+
+ try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .image_processing_nougat import NougatImageProcessor
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..983d9986304dd775c085dbc7e37a3bb79d2cb7f5
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/convert_nougat_to_hf.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/convert_nougat_to_hf.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..159c452aa0e46362caf3fcce2a91a848dbe3afc4
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/convert_nougat_to_hf.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/image_processing_nougat.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/image_processing_nougat.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9f6982e16cbbd70f10488bfd82c7ad4b5ea8fab3
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/image_processing_nougat.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/processing_nougat.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/processing_nougat.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6fd27c81d8aeed7b51b398a3010b69313ff6bd39
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/processing_nougat.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/tokenization_nougat_fast.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/tokenization_nougat_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..93297a76efeb14e9032cc7ad317d14c22126611e
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/tokenization_nougat_fast.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/nougat/convert_nougat_to_hf.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/nougat/convert_nougat_to_hf.py
new file mode 100644
index 0000000000000000000000000000000000000000..ecc74fdb5fbe8f0e4ad49069d7a739934ccc2330
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/nougat/convert_nougat_to_hf.py
@@ -0,0 +1,282 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert Nougat checkpoints using the original `nougat` library. URL:
+https://github.com/facebookresearch/nougat/tree/main"""
+
+import argparse
+
+import torch
+from huggingface_hub import hf_hub_download
+from nougat import NougatModel
+from nougat.dataset.rasterize import rasterize_paper
+from nougat.utils.checkpoint import get_checkpoint
+from PIL import Image
+
+from transformers import (
+ DonutSwinConfig,
+ DonutSwinModel,
+ MBartConfig,
+ MBartForCausalLM,
+ NougatImageProcessor,
+ NougatProcessor,
+ NougatTokenizerFast,
+ VisionEncoderDecoderModel,
+)
+
+
+def get_configs(model):
+ original_config = model.config
+
+ encoder_config = DonutSwinConfig(
+ image_size=original_config.input_size,
+ patch_size=4,
+ depths=original_config.encoder_layer,
+ num_heads=[4, 8, 16, 32],
+ window_size=original_config.window_size,
+ embed_dim=128,
+ )
+ decoder_config = MBartConfig(
+ is_decoder=True,
+ is_encoder_decoder=False,
+ add_cross_attention=True,
+ decoder_layers=original_config.decoder_layer,
+ max_position_embeddings=original_config.max_position_embeddings,
+ vocab_size=len(
+ model.decoder.tokenizer
+ ), # several special tokens are added to the vocab of XLMRobertaTokenizer, see repo on the hub (added_tokens.json)
+ scale_embedding=True,
+ add_final_layer_norm=True,
+ tie_word_embeddings=False,
+ )
+
+ return encoder_config, decoder_config
+
+
+# Copied from transformers.models.donut.convert_donut_to_pytorch.rename_key
+def rename_key(name):
+ if "encoder.model" in name:
+ name = name.replace("encoder.model", "encoder")
+ if "decoder.model" in name:
+ name = name.replace("decoder.model", "decoder")
+ if "patch_embed.proj" in name:
+ name = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection")
+ if "patch_embed.norm" in name:
+ name = name.replace("patch_embed.norm", "embeddings.norm")
+ if name.startswith("encoder"):
+ if "layers" in name:
+ name = "encoder." + name
+ if "attn.proj" in name:
+ name = name.replace("attn.proj", "attention.output.dense")
+ if "attn" in name and "mask" not in name:
+ name = name.replace("attn", "attention.self")
+ if "norm1" in name:
+ name = name.replace("norm1", "layernorm_before")
+ if "norm2" in name:
+ name = name.replace("norm2", "layernorm_after")
+ if "mlp.fc1" in name:
+ name = name.replace("mlp.fc1", "intermediate.dense")
+ if "mlp.fc2" in name:
+ name = name.replace("mlp.fc2", "output.dense")
+
+ if name == "encoder.norm.weight":
+ name = "encoder.layernorm.weight"
+ if name == "encoder.norm.bias":
+ name = "encoder.layernorm.bias"
+
+ return name
+
+
+# Copied from transformers.models.donut.convert_donut_to_pytorch.convert_state_dict
+def convert_state_dict(orig_state_dict, model):
+ for key in orig_state_dict.copy().keys():
+ val = orig_state_dict.pop(key)
+
+ if "qkv" in key:
+ key_split = key.split(".")
+ layer_num = int(key_split[3])
+ block_num = int(key_split[5])
+ dim = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
+
+ if "weight" in key:
+ orig_state_dict[
+ f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.weight"
+ ] = val[:dim, :]
+ orig_state_dict[
+ f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.weight"
+ ] = val[dim : dim * 2, :]
+ orig_state_dict[
+ f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.weight"
+ ] = val[-dim:, :]
+ else:
+ orig_state_dict[
+ f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.bias"
+ ] = val[:dim]
+ orig_state_dict[
+ f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.bias"
+ ] = val[dim : dim * 2]
+ orig_state_dict[
+ f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.bias"
+ ] = val[-dim:]
+ elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
+ # HuggingFace implementation doesn't use attn_mask buffer
+ # and model doesn't use final LayerNorms for the encoder
+ pass
+ else:
+ orig_state_dict[rename_key(key)] = val
+
+ return orig_state_dict
+
+
+def convert_nougat_checkpoint(model_tag, pytorch_dump_folder_path=None, push_to_hub=False):
+ # load original model
+ checkpoint_path = get_checkpoint(None, model_tag)
+ original_model = NougatModel.from_pretrained(checkpoint_path)
+ original_model.eval()
+
+ # load HuggingFace model
+ encoder_config, decoder_config = get_configs(original_model)
+ encoder = DonutSwinModel(encoder_config)
+ decoder = MBartForCausalLM(decoder_config)
+ model = VisionEncoderDecoderModel(encoder=encoder, decoder=decoder)
+ model.eval()
+
+ state_dict = original_model.state_dict()
+ new_state_dict = convert_state_dict(state_dict, model)
+ model.load_state_dict(new_state_dict)
+
+ # verify results on PDF
+ filepath = hf_hub_download(repo_id="ysharma/nougat", filename="input/nougat.pdf", repo_type="space")
+ images = rasterize_paper(pdf=filepath, return_pil=True)
+ image = Image.open(images[0])
+
+ tokenizer_file = checkpoint_path / "tokenizer.json"
+ tokenizer = NougatTokenizerFast(tokenizer_file=str(tokenizer_file))
+ tokenizer.pad_token = ""
+ tokenizer.bos_token = ""
+ tokenizer.eos_token = ""
+ tokenizer.unk_token = ""
+ tokenizer.model_max_length = original_model.config.max_length
+
+ size = {"height": original_model.config.input_size[0], "width": original_model.config.input_size[1]}
+ image_processor = NougatImageProcessor(
+ do_align_long_axis=original_model.config.align_long_axis,
+ size=size,
+ )
+ processor = NougatProcessor(image_processor=image_processor, tokenizer=tokenizer)
+
+ # verify pixel_values
+ pixel_values = processor(image, return_tensors="pt").pixel_values
+ original_pixel_values = original_model.encoder.prepare_input(image).unsqueeze(0)
+
+ assert torch.allclose(original_pixel_values, pixel_values)
+
+ # verify patch embeddings
+ original_patch_embed = original_model.encoder.model.patch_embed(pixel_values)
+ patch_embeddings, _ = model.encoder.embeddings(pixel_values)
+ assert torch.allclose(original_patch_embed, patch_embeddings)
+
+ # verify encoder hidden states
+ original_last_hidden_state = original_model.encoder(pixel_values)
+ last_hidden_state = model.encoder(pixel_values).last_hidden_state
+ assert torch.allclose(original_last_hidden_state, last_hidden_state, atol=1e-2)
+
+ # NOTE original model does not use tied weights for embeddings of decoder
+ original_embeddings = original_model.decoder.model.model.decoder.embed_tokens
+ embeddings = model.decoder.model.decoder.embed_tokens
+ assert torch.allclose(original_embeddings.weight, embeddings.weight, atol=1e-3)
+
+ # verify decoder hidden states
+ prompt = "hello world"
+ decoder_input_ids = original_model.decoder.tokenizer(
+ prompt, add_special_tokens=False, return_tensors="pt"
+ ).input_ids
+ decoder_attention_mask = torch.ones_like(decoder_input_ids)
+ original_logits = original_model(
+ image_tensors=pixel_values, decoder_input_ids=decoder_input_ids, attention_mask=decoder_attention_mask
+ ).logits
+ logits = model(
+ pixel_values,
+ decoder_input_ids=decoder_input_ids[:, :-1],
+ decoder_attention_mask=decoder_attention_mask[:, :-1],
+ ).logits
+ assert torch.allclose(original_logits, logits, atol=1e-3)
+
+ # verify generation
+ outputs = model.generate(
+ pixel_values,
+ min_length=1,
+ max_length=30,
+ pad_token_id=tokenizer.pad_token_id,
+ eos_token_id=tokenizer.eos_token_id,
+ use_cache=True,
+ bad_words_ids=[
+ [tokenizer.unk_token_id],
+ ],
+ return_dict_in_generate=True,
+ do_sample=False,
+ )
+ generated = tokenizer.batch_decode(outputs.sequences, skip_special_tokens=True)[0]
+
+ if model_tag == "0.1.0-base":
+ expected_generation = "# Nougat: Neural Optical Understanding for Academic Documents\n\nLukas Blecher\n\nCorrespondence to: lblec"
+ elif model_tag == "0.1.0-small":
+ expected_generation = (
+ "# Nougat: Neural Optical Understanding for Academic Documents\n\nLukas Blecher\n\nCorrespondence to: lble"
+ )
+ else:
+ raise ValueError(f"Unexpected model tag: {model_tag}")
+
+ assert generated == expected_generation
+ print("Looks ok!")
+
+ if pytorch_dump_folder_path is not None:
+ print(f"Saving model and processor to {pytorch_dump_folder_path}")
+ model.save_pretrained(pytorch_dump_folder_path)
+ processor.save_pretrained(pytorch_dump_folder_path)
+
+ if push_to_hub:
+ tag_to_name = {"0.1.0-base": "nougat-base", "0.1.0-small": "nougat-small"}
+ model_name = tag_to_name[model_tag]
+
+ model.push_to_hub(f"facebook/{model_name}")
+ processor.push_to_hub(f"facebook/{model_name}")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--model_tag",
+ default="0.1.0-base",
+ required=False,
+ type=str,
+ choices=["0.1.0-base", "0.1.0-small"],
+ help="Tag of the original model you'd like to convert.",
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path",
+ default=None,
+ required=False,
+ type=str,
+ help="Path to the output PyTorch model directory.",
+ )
+ parser.add_argument(
+ "--push_to_hub",
+ action="store_true",
+ help="Whether or not to push the converted model and processor to the 🤗 hub.",
+ )
+
+ args = parser.parse_args()
+ convert_nougat_checkpoint(args.model_tag, args.pytorch_dump_folder_path, args.push_to_hub)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/nougat/image_processing_nougat.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/nougat/image_processing_nougat.py
new file mode 100644
index 0000000000000000000000000000000000000000..49913d5baa080ba3797b4383983d22f15691004c
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/nougat/image_processing_nougat.py
@@ -0,0 +1,532 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Image processor class for Nougat."""
+
+from typing import Dict, List, Optional, Union
+
+import numpy as np
+
+from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
+from ...image_transforms import (
+ get_resize_output_image_size,
+ pad,
+ resize,
+ to_channel_dimension_format,
+ to_pil_image,
+)
+from ...image_utils import (
+ IMAGENET_DEFAULT_MEAN,
+ IMAGENET_DEFAULT_STD,
+ ChannelDimension,
+ ImageInput,
+ PILImageResampling,
+ get_image_size,
+ infer_channel_dimension_format,
+ is_scaled_image,
+ make_list_of_images,
+ to_numpy_array,
+ valid_images,
+ validate_kwargs,
+ validate_preprocess_arguments,
+)
+from ...utils import TensorType, logging
+from ...utils.import_utils import is_cv2_available, is_vision_available
+
+
+logger = logging.get_logger(__name__)
+
+
+if is_cv2_available():
+ pass
+
+
+if is_vision_available():
+ import PIL
+
+
+class NougatImageProcessor(BaseImageProcessor):
+ r"""
+ Constructs a Nougat image processor.
+
+ Args:
+ do_crop_margin (`bool`, *optional*, defaults to `True`):
+ Whether to crop the image margins.
+ do_resize (`bool`, *optional*, defaults to `True`):
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
+ `do_resize` in the `preprocess` method.
+ size (`Dict[str, int]` *optional*, defaults to `{"height": 896, "width": 672}`):
+ Size of the image after resizing. Can be overridden by `size` in the `preprocess` method.
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
+ Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
+ do_thumbnail (`bool`, *optional*, defaults to `True`):
+ Whether to resize the image using thumbnail method.
+ do_align_long_axis (`bool`, *optional*, defaults to `False`):
+ Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees.
+ do_pad (`bool`, *optional*, defaults to `True`):
+ Whether to pad the images to the largest image size in the batch.
+ do_rescale (`bool`, *optional*, defaults to `True`):
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
+ parameter in the `preprocess` method.
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
+ `preprocess` method.
+ do_normalize (`bool`, *optional*, defaults to `True`):
+ Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`):
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`):
+ Image standard deviation.
+ """
+
+ model_input_names = ["pixel_values"]
+
+ def __init__(
+ self,
+ do_crop_margin: bool = True,
+ do_resize: bool = True,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
+ do_thumbnail: bool = True,
+ do_align_long_axis: bool = False,
+ do_pad: bool = True,
+ do_rescale: bool = True,
+ rescale_factor: Union[int, float] = 1 / 255,
+ do_normalize: bool = True,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ **kwargs,
+ ) -> None:
+ super().__init__(**kwargs)
+
+ size = size if size is not None else {"height": 896, "width": 672}
+ size = get_size_dict(size)
+
+ self.do_crop_margin = do_crop_margin
+ self.do_resize = do_resize
+ self.size = size
+ self.resample = resample
+ self.do_thumbnail = do_thumbnail
+ self.do_align_long_axis = do_align_long_axis
+ self.do_pad = do_pad
+ self.do_rescale = do_rescale
+ self.rescale_factor = rescale_factor
+ self.do_normalize = do_normalize
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
+ self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
+ self._valid_processor_keys = [
+ "images",
+ "do_crop_margin",
+ "do_resize",
+ "size",
+ "resample",
+ "do_thumbnail",
+ "do_align_long_axis",
+ "do_pad",
+ "do_rescale",
+ "rescale_factor",
+ "do_normalize",
+ "image_mean",
+ "image_std",
+ "return_tensors",
+ "data_format",
+ "input_data_format",
+ ]
+
+ def python_find_non_zero(self, image: np.array):
+ """This is a reimplementation of a findNonZero function equivalent to cv2."""
+ non_zero_indices = np.column_stack(np.nonzero(image))
+ idxvec = non_zero_indices[:, [1, 0]]
+ idxvec = idxvec.reshape(-1, 1, 2)
+ return idxvec
+
+ def python_bounding_rect(self, coordinates):
+ """This is a reimplementation of a BoundingRect function equivalent to cv2."""
+ min_values = np.min(coordinates, axis=(0, 1)).astype(int)
+ max_values = np.max(coordinates, axis=(0, 1)).astype(int)
+ x_min, y_min = min_values[0], min_values[1]
+ width = max_values[0] - x_min + 1
+ height = max_values[1] - y_min + 1
+ return x_min, y_min, width, height
+
+ def crop_margin(
+ self,
+ image: np.array,
+ gray_threshold: int = 200,
+ data_format: Optional[ChannelDimension] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> np.array:
+ """
+ Crops the margin of the image. Gray pixels are considered margin (i.e., pixels with a value below the
+ threshold).
+
+ Args:
+ image (`np.array`):
+ The image to be cropped.
+ gray_threshold (`int`, *optional*, defaults to `200`)
+ Value below which pixels are considered to be gray.
+ data_format (`ChannelDimension`, *optional*):
+ The channel dimension format of the output image. If unset, will use the inferred format from the
+ input.
+ input_data_format (`ChannelDimension`, *optional*):
+ The channel dimension format of the input image. If unset, will use the inferred format from the input.
+ """
+ if input_data_format is None:
+ input_data_format = infer_channel_dimension_format(image)
+
+ image = to_pil_image(image, input_data_format=input_data_format)
+ data = np.array(image.convert("L")).astype(np.uint8)
+ max_val = data.max()
+ min_val = data.min()
+ if max_val == min_val:
+ image = np.array(image)
+ image = (
+ to_channel_dimension_format(image, data_format, input_data_format)
+ if data_format is not None
+ else image
+ )
+ return image
+ data = (data - min_val) / (max_val - min_val) * 255
+ gray = data < gray_threshold
+ coords = self.python_find_non_zero(gray)
+ x_min, y_min, width, height = self.python_bounding_rect(coords)
+ image = image.crop((x_min, y_min, x_min + width, y_min + height))
+ image = np.array(image).astype(np.uint8)
+ image = to_channel_dimension_format(image, input_data_format, ChannelDimension.LAST)
+
+ image = (
+ to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image
+ )
+
+ return image
+
+ # Copied from transformers.models.donut.image_processing_donut.DonutImageProcessor.align_long_axis
+ def align_long_axis(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> np.ndarray:
+ """
+ Align the long axis of the image to the longest axis of the specified size.
+
+ Args:
+ image (`np.ndarray`):
+ The image to be aligned.
+ size (`Dict[str, int]`):
+ The size `{"height": h, "width": w}` to align the long axis to.
+ data_format (`str` or `ChannelDimension`, *optional*):
+ The data format of the output image. If unset, the same format as the input image is used.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred.
+
+ Returns:
+ `np.ndarray`: The aligned image.
+ """
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
+ output_height, output_width = size["height"], size["width"]
+
+ if (output_width < output_height and input_width > input_height) or (
+ output_width > output_height and input_width < input_height
+ ):
+ image = np.rot90(image, 3)
+
+ if data_format is not None:
+ image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
+
+ return image
+
+ def pad_image(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> np.ndarray:
+ """
+ Pad the image to the specified size at the top, bottom, left and right.
+
+ Args:
+ image (`np.ndarray`):
+ The image to be padded.
+ size (`Dict[str, int]`):
+ The size `{"height": h, "width": w}` to pad the image to.
+ data_format (`str` or `ChannelDimension`, *optional*):
+ The data format of the output image. If unset, the same format as the input image is used.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred.
+ """
+ output_height, output_width = size["height"], size["width"]
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
+
+ delta_width = output_width - input_width
+ delta_height = output_height - input_height
+
+ pad_top = delta_height // 2
+ pad_left = delta_width // 2
+
+ pad_bottom = delta_height - pad_top
+ pad_right = delta_width - pad_left
+
+ padding = ((pad_top, pad_bottom), (pad_left, pad_right))
+ return pad(image, padding, data_format=data_format, input_data_format=input_data_format)
+
+ # Copied from transformers.models.donut.image_processing_donut.DonutImageProcessor.thumbnail
+ def thumbnail(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> np.ndarray:
+ """
+ Resize the image to make a thumbnail. The image is resized so that no dimension is larger than any
+ corresponding dimension of the specified size.
+
+ Args:
+ image (`np.ndarray`):
+ The image to be resized.
+ size (`Dict[str, int]`):
+ The size `{"height": h, "width": w}` to resize the image to.
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
+ The resampling filter to use.
+ data_format (`Optional[Union[str, ChannelDimension]]`, *optional*):
+ The data format of the output image. If unset, the same format as the input image is used.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred.
+ """
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
+ output_height, output_width = size["height"], size["width"]
+
+ # We always resize to the smallest of either the input or output size.
+ height = min(input_height, output_height)
+ width = min(input_width, output_width)
+
+ if height == input_height and width == input_width:
+ return image
+
+ if input_height > input_width:
+ width = int(input_width * height / input_height)
+ elif input_width > input_height:
+ height = int(input_height * width / input_width)
+
+ return resize(
+ image,
+ size=(height, width),
+ resample=resample,
+ reducing_gap=2.0,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ **kwargs,
+ )
+
+ # Copied from transformers.models.donut.image_processing_donut.DonutImageProcessor.resize
+ def resize(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> np.ndarray:
+ """
+ Resizes `image` to `(height, width)` specified by `size` using the PIL library.
+
+ Args:
+ image (`np.ndarray`):
+ Image to resize.
+ size (`Dict[str, int]`):
+ Size of the output image.
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
+ Resampling filter to use when resiizing the image.
+ data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred.
+ """
+ size = get_size_dict(size)
+ shortest_edge = min(size["height"], size["width"])
+ output_size = get_resize_output_image_size(
+ image, size=shortest_edge, default_to_square=False, input_data_format=input_data_format
+ )
+ resized_image = resize(
+ image,
+ size=output_size,
+ resample=resample,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ **kwargs,
+ )
+ return resized_image
+
+ def preprocess(
+ self,
+ images: ImageInput,
+ do_crop_margin: bool = None,
+ do_resize: bool = None,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = None,
+ do_thumbnail: bool = None,
+ do_align_long_axis: bool = None,
+ do_pad: bool = None,
+ do_rescale: bool = None,
+ rescale_factor: Union[int, float] = None,
+ do_normalize: bool = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> PIL.Image.Image:
+ """
+ Preprocess an image or batch of images.
+
+ Args:
+ images (`ImageInput`):
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255.
+ do_crop_margin (`bool`, *optional*, defaults to `self.do_crop_margin`):
+ Whether to crop the image margins.
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
+ Whether to resize the image.
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
+ Size of the image after resizing. Shortest edge of the image is resized to min(size["height"],
+ size["width"]) with the longest edge resized to keep the input aspect ratio.
+ resample (`int`, *optional*, defaults to `self.resample`):
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
+ has an effect if `do_resize` is set to `True`.
+ do_thumbnail (`bool`, *optional*, defaults to `self.do_thumbnail`):
+ Whether to resize the image using thumbnail method.
+ do_align_long_axis (`bool`, *optional*, defaults to `self.do_align_long_axis`):
+ Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees.
+ do_pad (`bool`, *optional*, defaults to `self.do_pad`):
+ Whether to pad the images to the largest image size in the batch.
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
+ Whether to rescale the image by the specified scale `rescale_factor`.
+ rescale_factor (`int` or `float`, *optional*, defaults to `self.rescale_factor`):
+ Scale factor to use if rescaling the image.
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
+ Whether to normalize the image.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
+ Image mean to use for normalization.
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
+ Image standard deviation to use for normalization.
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Can be one of:
+ - Unset: Return a list of `np.ndarray`.
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format for the output image. Can be one of:
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - Unset: defaults to the channel dimension format of the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ """
+ do_crop_margin = do_crop_margin if do_crop_margin is not None else self.do_crop_margin
+ do_resize = do_resize if do_resize is not None else self.do_resize
+ size = size if size is not None else self.size
+ resample = resample if resample is not None else self.resample
+ do_thumbnail = do_thumbnail if do_thumbnail is not None else self.do_thumbnail
+ do_align_long_axis = do_align_long_axis if do_align_long_axis is not None else self.do_align_long_axis
+ do_pad = do_pad if do_pad is not None else self.do_pad
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
+ image_mean = image_mean if image_mean is not None else self.image_mean
+ image_std = image_std if image_std is not None else self.image_std
+
+ images = make_list_of_images(images)
+
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
+
+ if not valid_images(images):
+ raise ValueError(
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+ validate_preprocess_arguments(
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ do_pad=do_pad,
+ size_divisibility=size, # There is no pad divisibility in this processor, but pad requires the size arg.
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ )
+
+ # All transformations expect numpy arrays.
+ images = [to_numpy_array(image) for image in images]
+
+ if is_scaled_image(images[0]) and do_rescale:
+ logger.warning_once(
+ "It looks like you are trying to rescale already rescaled images. If the input"
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
+ )
+
+ if input_data_format is None:
+ # We assume that all images have the same channel dimension format.
+ input_data_format = infer_channel_dimension_format(images[0])
+
+ if do_crop_margin:
+ images = [self.crop_margin(image, input_data_format=input_data_format) for image in images]
+
+ if do_align_long_axis:
+ images = [self.align_long_axis(image, size=size, input_data_format=input_data_format) for image in images]
+
+ if do_resize:
+ images = [
+ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ if do_thumbnail:
+ images = [self.thumbnail(image=image, size=size, input_data_format=input_data_format) for image in images]
+
+ if do_pad:
+ images = [self.pad_image(image=image, size=size, input_data_format=input_data_format) for image in images]
+
+ if do_rescale:
+ images = [
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ if do_normalize:
+ images = [
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ images = [
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
+ ]
+
+ data = {"pixel_values": images}
+ return BatchFeature(data=data, tensor_type=return_tensors)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/nougat/processing_nougat.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/nougat/processing_nougat.py
new file mode 100644
index 0000000000000000000000000000000000000000..8f94c6718ba6600ebebef4c0e1fdb9865c609e5e
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/nougat/processing_nougat.py
@@ -0,0 +1,160 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Processor class for Nougat.
+"""
+
+from typing import Dict, List, Optional, Union
+
+from transformers.tokenization_utils_base import PreTokenizedInput, TextInput, TruncationStrategy
+
+from ...processing_utils import ProcessorMixin
+from ...utils import PaddingStrategy, TensorType
+
+
+class NougatProcessor(ProcessorMixin):
+ r"""
+ Constructs a Nougat processor which wraps a Nougat image processor and a Nougat tokenizer into a single processor.
+
+ [`NougatProcessor`] offers all the functionalities of [`NougatImageProcessor`] and [`NougatTokenizerFast`]. See the
+ [`~NougatProcessor.__call__`] and [`~NougatProcessor.decode`] for more information.
+
+ Args:
+ image_processor ([`NougatImageProcessor`]):
+ An instance of [`NougatImageProcessor`]. The image processor is a required input.
+ tokenizer ([`NougatTokenizerFast`]):
+ An instance of [`NougatTokenizerFast`]. The tokenizer is a required input.
+ """
+
+ attributes = ["image_processor", "tokenizer"]
+ image_processor_class = "AutoImageProcessor"
+ tokenizer_class = "AutoTokenizer"
+
+ def __init__(self, image_processor, tokenizer):
+ super().__init__(image_processor, tokenizer)
+ self.current_processor = self.image_processor
+
+ def __call__(
+ self,
+ images=None,
+ text=None,
+ do_crop_margin: bool = None,
+ do_resize: bool = None,
+ size: Dict[str, int] = None,
+ resample: "PILImageResampling" = None, # noqa: F821
+ do_thumbnail: bool = None,
+ do_align_long_axis: bool = None,
+ do_pad: bool = None,
+ do_rescale: bool = None,
+ rescale_factor: Union[int, float] = None,
+ do_normalize: bool = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ data_format: Optional["ChannelDimension"] = "channels_first", # noqa: F821
+ input_data_format: Optional[Union[str, "ChannelDimension"]] = None, # noqa: F821
+ text_pair: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None,
+ text_target: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
+ text_pair_target: Optional[
+ Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]
+ ] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ is_split_into_words: bool = False,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ ):
+ if images is None and text is None:
+ raise ValueError("You need to specify either an `images` or `text` input to process.")
+
+ if images is not None:
+ inputs = self.image_processor(
+ images,
+ do_crop_margin=do_crop_margin,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ do_thumbnail=do_thumbnail,
+ do_align_long_axis=do_align_long_axis,
+ do_pad=do_pad,
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ return_tensors=return_tensors,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ )
+ if text is not None:
+ encodings = self.tokenizer(
+ text,
+ text_pair=text_pair,
+ text_target=text_target,
+ text_pair_target=text_pair_target,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ stride=stride,
+ is_split_into_words=is_split_into_words,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ )
+
+ if text is None:
+ return inputs
+ elif images is None:
+ return encodings
+ else:
+ inputs["labels"] = encodings["input_ids"]
+ return inputs
+
+ def batch_decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to NougatTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer
+ to the docstring of this method for more information.
+ """
+ return self.tokenizer.batch_decode(*args, **kwargs)
+
+ def decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to NougatTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to
+ the docstring of this method for more information.
+ """
+ return self.tokenizer.decode(*args, **kwargs)
+
+ def post_process_generation(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to NougatTokenizer's [`~PreTrainedTokenizer.post_process_generation`].
+ Please refer to the docstring of this method for more information.
+ """
+ return self.tokenizer.post_process_generation(*args, **kwargs)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/nougat/tokenization_nougat_fast.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/nougat/tokenization_nougat_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..d02aec75752123ae385bdd736ecdfd420af601aa
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/nougat/tokenization_nougat_fast.py
@@ -0,0 +1,634 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Fast tokenizer class for Nougat.
+"""
+import re
+from functools import partial
+from multiprocessing import Pool
+from typing import List, Union
+
+import numpy as np
+
+from transformers.tokenization_utils_base import INIT_TOKENIZER_DOCSTRING
+from transformers.tokenization_utils_fast import PreTrainedTokenizerFast
+from transformers.utils import add_end_docstrings
+
+from ...utils import is_levenshtein_available, is_nltk_available, logging, requires_backends
+
+
+if is_levenshtein_available():
+ from Levenshtein import ratio
+
+if is_nltk_available():
+ import nltk
+
+
+logger = logging.get_logger(__name__)
+
+
+INIT_TOKENIZER_DOCSTRING += """
+ tokenizer_object ([`tokenizers.Tokenizer`]):
+ A [`tokenizers.Tokenizer`] object from 🤗 tokenizers to instantiate from. See [Using tokenizers from 🤗
+ tokenizers](../fast_tokenizers) for more information.
+ tokenizer_file ([`str`]):
+ A path to a local JSON file representing a previously serialized [`tokenizers.Tokenizer`] object from 🤗
+ tokenizers.
+"""
+
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "tokenizer_file": {
+ "facebook/nougat-base": "https://huggingface.co/facebook/nougat-base/tokenizer/blob/main/tokenizer.json",
+ },
+}
+
+VOCAB_FILES_NAMES = {"tokenizer_file": "tokenizer.json"}
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"facebook/nougat-base": 3584}
+
+
+def markdown_compatible(text: str) -> str:
+ """
+ Make text compatible with Markdown formatting.
+
+ This function makes various text formatting adjustments to make it compatible with Markdown.
+
+ Args:
+ text (`str`):
+ The input text to be made Markdown-compatible.
+
+ Returns:
+ `str`: The Markdown-compatible text.
+ """
+ # equation tag
+ # Replace lines that start with a pattern like (decimal) \[some text\] with \[[some text] \tag{decimal}\].
+ text = re.sub(r"^\(([\d.]+[a-zA-Z]?)\) \\\[(.+?)\\\]$", r"\[\2 \\tag{\1}\]", text, flags=re.M)
+ # Replace lines that start with a pattern like \[some text\] (decimal) with \[[some text] \tag{decimal}\].
+ text = re.sub(r"^\\\[(.+?)\\\] \(([\d.]+[a-zA-Z]?)\)$", r"\[\1 \\tag{\2}\]", text, flags=re.M)
+ # Replace lines that start with a pattern like \[some text\] (digits) \[another text\] with \[[some text] \tag{digits}\] [another text].
+ text = re.sub(
+ r"^\\\[(.+?)\\\] \(([\d.]+[a-zA-Z]?)\) (\\\[.+?\\\])$",
+ r"\[\1 \\tag{\2}\] \3",
+ text,
+ flags=re.M,
+ )
+ # multi line
+ text = text.replace(r"\. ", ". ")
+ # bold formatting
+ text = text.replace(r"\bm{", r"\mathbf{").replace(r"{\\bm ", r"\mathbf{")
+ text = re.sub(r"\\mbox{ ?\\boldmath\$(.*?)\$}", r"\\mathbf{\1}", text)
+ # Reformat urls (http, ftp and https only) to markdown [url](url) clickable format
+ text = re.sub(
+ r"((?:http|ftp|https):\/\/(?:[\w_-]+(?:(?:\.[\w_-]+)+))(?:[\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-]))",
+ r"[\1](\1)",
+ text,
+ )
+ # algorithms
+ text = re.sub(r"```\s*(.+?)\s*```", r"```\n\1\n```", text, flags=re.S)
+
+ return text
+
+
+def normalize_list_like_lines(generation):
+ """
+ Normalize lines in the given text that resemble list items. The function looks for lines that start optionally with
+ '-' or '*', possibly followed by Roman numerals or digits indicating nesting levels. The function reformats such
+ lines to make them more structured.
+
+ Args:
+ generation (str): The input text containing lines that need to be normalized.
+
+ Returns:
+ str: The input text with the list-like lines normalized.
+
+ Note:
+ The function uses regular expressions to identify and reformat the list-like lines. The patterns capture
+ optional bullet points, nesting levels indicated by numerals, and the actual list item content. The
+ normalization adjusts the bullet point style and nesting levels based on the captured patterns.
+ """
+
+ # This matches lines starting with - or *, not followed by - or * (lists)
+ # that are then numbered by digits \d or roman numerals (one or more)
+ # and then, optional additional numbering of this line is captured
+ # this is then fed to re.finditer.
+ pattern = r"(?:^)(-|\*)?(?!-|\*) ?((?:\d|[ixv])+ )?.+? (-|\*) (((?:\d|[ixv])+)\.(\d|[ixv]) )?.*(?:$)"
+
+ for match in reversed(list(re.finditer(pattern, generation, flags=re.I | re.M))):
+ start, stop = match.span()
+ delim = match.group(3) + " "
+ splits = match.group(0).split(delim)
+ replacement = ""
+
+ if match.group(1) is not None:
+ splits = splits[1:]
+ delim1 = match.group(1) + " "
+ else:
+ delim1 = ""
+ continue # Skip false positives
+
+ pre, post = generation[:start], generation[stop:]
+
+ for i, item in enumerate(splits):
+ level = 0
+ potential_numeral, _, rest = item.strip().partition(" ")
+ if not rest:
+ continue
+ # Infer current nesting level based on detected numbering
+ if re.match(r"^[\dixv]+((?:\.[\dixv])?)+$", potential_numeral, flags=re.I | re.M):
+ level = potential_numeral.count(".")
+
+ replacement += (
+ ("\n" if i > 0 else "") + ("\t" * level) + (delim if i > 0 or start == 0 else delim1) + item.strip()
+ )
+
+ if post == "":
+ post = "\n"
+
+ generation = pre + replacement + post
+
+ return generation
+
+
+def find_next_punctuation(text: str, start_idx=0):
+ """
+ Find the index of the next punctuation mark.
+
+ Args:
+ text (`str`):
+ String to examine
+ start_idx (`int`, *optional*)
+ Index where to start
+ """
+
+ for i in range(start_idx, len(text)):
+ if text[i] in [".", "?", "!", "\n"]:
+ return i
+
+ return None
+
+
+def truncate_repetitions(text: str, min_len: int = 30) -> str:
+ """
+ Attempt to truncate repeating segments in the input string.
+
+ This function looks for the longest repeating substring at the end of the input string and truncates it to appear
+ only once. To be considered for removal, repetitions need to be continuous.
+
+ Args:
+ text (`str`):
+ The input raw prediction to be truncated.
+ min_len (int):
+ The minimum length of the repeating segment.
+
+ Returns:
+ `str`: The input string with repeated segments truncated.
+ """
+ text_lower = text.lower()
+ text_length = len(text_lower)
+
+ if text_length < 2 * min_len:
+ return text
+
+ # try to find a length at which the tail is repeating
+ max_repetition_length = None
+ for repetition_length in range(min_len, int(text_length / 2)):
+ # check if there is a repetition at the end
+ same = True
+ for i in range(0, repetition_length):
+ if text_lower[text_length - repetition_length - i - 1] != text_lower[text_length - i - 1]:
+ same = False
+ break
+
+ if same:
+ max_repetition_length = repetition_length
+
+ if max_repetition_length is None:
+ return text
+
+ lcs = text_lower[-max_repetition_length:]
+
+ # remove all but the last repetition
+ substituted_text = text
+ substituted_text_lower = text_lower
+ while substituted_text_lower.endswith(lcs):
+ substituted_text = substituted_text[:-max_repetition_length]
+ substituted_text_lower = substituted_text_lower[:-max_repetition_length]
+
+ # this is the tail with the repetitions
+ repeating_tail = text_lower[len(substituted_text_lower) :]
+
+ # add until next punctuation and make sure last sentence is not repeating
+ substituted_text_lower_out = substituted_text_lower
+ while True:
+ sentence_end = find_next_punctuation(text_lower, len(substituted_text_lower_out))
+ sentence_start = find_next_punctuation(text_lower[::-1], len(substituted_text_lower_out))
+ if sentence_end and sentence_start:
+ sentence = text_lower[sentence_start:sentence_end]
+ substituted_text_lower_out = text_lower[: sentence_end + 1]
+ if sentence in repeating_tail:
+ break
+ else:
+ break
+
+ text_out = text[: len(substituted_text_lower_out)]
+
+ return text_out
+
+
+def remove_numbers(lines):
+ def _clean(s):
+ return re.sub(r"(?:[\d_]|\*\*)", "", s).strip()
+
+ if isinstance(lines, str):
+ return _clean(lines)
+ out = []
+ for l in lines:
+ out.append(_clean(l))
+ return out
+
+
+def get_slices(lines, clean_lines):
+ """
+ Get slices of text based on specific criteria within the lines.
+
+ This function identifies and returns slices of text from the input lines based on certain conditions.
+
+ These conditions were chosen by the Nougat authors:
+ - The slice is less than 200 characters long.
+ - The slice is more than 3 characters long.
+ - The slice does not start with "[MISSING_PAGE".
+ - The slice is either the same as the next slice or the ratio of the two in terms of Levensthein distance is
+ greater than 0.9.
+
+ Args:
+ lines (`List[str]`):
+ The list of lines containing the text.
+ clean_lines (`List[str]`):
+ A cleaned version of the text (without numbers).
+
+ Returns:
+ `List[tuple]`: A list of tuples representing the start and end indices of text slices.
+ """
+ indices = np.zeros(len(lines))
+ for i in range(len(lines) - 1):
+ j = i + 1
+ while not clean_lines[j] and j < len(lines) - 1:
+ j += 1
+ if (
+ len(clean_lines[i]) < 200
+ and len(clean_lines[i]) > 3
+ and len(clean_lines[j]) < 200
+ and len(clean_lines[j]) > 3
+ and not clean_lines[i].startswith("[MISSING_PAGE")
+ and (clean_lines[i] == clean_lines[j] or ratio(clean_lines[i], clean_lines[j]) > 0.9)
+ ):
+ indices[i:j] = 1
+ ids = np.where(indices)[0]
+ slices = []
+ if len(ids) == 0:
+ return slices
+ j0 = 0
+ for j, x in enumerate(np.diff(ids) > 3):
+ if x:
+ slices.append((ids[j0], ids[j] + 2))
+ j0 = j + 1
+ slices.append((ids[j0], ids[-1] + 2))
+ return [sli for sli in slices if sli[1] - sli[0] > 15]
+
+
+def remove_slice_from_lines(lines, clean_text, slice) -> str:
+ """
+ Remove a slice of text from the lines based on specific criteria.
+
+ This function identifies a slice of text within the lines and removes it based on certain conditions.
+
+ Args:
+ lines (list of str): The list of lines containing the text.
+ clean_text (list of str): A cleaned version of the text (without numbers).
+ slice (tuple): A tuple representing the start and end indices of the slice to be removed.
+
+ Returns:
+ str: The removed slice of text as a single string.
+ """
+ base = clean_text[slice[0]]
+ section = list(slice)
+ check_start_flag = False
+ # backwards pass, at most 5 lines
+ for line_idx in range(max(0, slice[0] - 1), max(0, slice[0] - 5), -1):
+ if not lines[line_idx]:
+ continue
+ if lines[line_idx] == "## References":
+ section[0] = line_idx
+ break
+ elif ratio(base, remove_numbers(lines[line_idx])) < 0.9:
+ section[0] = line_idx + 1
+ potential_ref = remove_numbers(lines[max(0, line_idx - 1)].partition("* [")[-1])
+ if len(potential_ref) >= 0.75 * len(base) and ratio(base, potential_ref) < 0.9:
+ section[0] = line_idx
+ check_start_flag = True
+ break
+ # forward pass, at most 5 lines
+ for line_idx in range(min(len(lines), slice[1]), min(len(lines), slice[1] + 5)):
+ if ratio(base, remove_numbers(lines[line_idx])) < 0.9:
+ section[1] = line_idx
+ break
+ if len(lines) <= section[1]:
+ section[1] = len(lines) - 1
+ to_delete = "\n".join(lines[section[0] : section[1] + 1])
+ # cut off next page content
+ itera, iterb = enumerate(lines[section[1] - 1]), enumerate(lines[section[1]])
+ while True:
+ try:
+ (ia, a) = next(itera)
+ while a.isnumeric():
+ (ia, a) = next(itera)
+ (ib, b) = next(iterb)
+ while b.isnumeric():
+ (ib, b) = next(iterb)
+ if a != b:
+ break
+ except StopIteration:
+ break
+ if check_start_flag and "* [" in to_delete:
+ to_delete = "* [" + to_delete.partition("* [")[-1]
+ try:
+ delta = len(lines[section[1]]) - ib - 1
+ if delta > 0:
+ to_delete = to_delete[:-delta]
+ except UnboundLocalError:
+ pass
+
+ return to_delete.strip()
+
+
+@add_end_docstrings(INIT_TOKENIZER_DOCSTRING)
+class NougatTokenizerFast(PreTrainedTokenizerFast):
+ """
+ Fast tokenizer for Nougat (backed by HuggingFace tokenizers library).
+
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
+ refer to this superclass for more information regarding those methods. This class mainly adds Nougat-specific
+ methods for postprocessing the generated text.
+
+ Args:
+ vocab_file (`str`, *optional*):
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .model extension) that
+ contains the vocabulary necessary to instantiate a tokenizer.
+ tokenizer_file (`str`, *optional*):
+ [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that
+ contains everything needed to load the tokenizer.
+
+ clean_up_tokenization_spaces (`str`, *optional*, defaults to `False`):
+ Wether to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra
+ spaces.
+
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ model_input_names = ["input_ids", "attention_mask"]
+ slow_tokenizer_class = None
+
+ def __init__(
+ self,
+ vocab_file=None,
+ tokenizer_file=None,
+ clean_up_tokenization_spaces=False,
+ unk_token="",
+ bos_token="",
+ eos_token="",
+ pad_token="",
+ **kwargs,
+ ):
+ super().__init__(
+ vocab_file=vocab_file,
+ tokenizer_file=tokenizer_file,
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
+ unk_token=unk_token,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ pad_token=pad_token,
+ **kwargs,
+ )
+ self.vocab_file = vocab_file
+
+ def remove_hallucinated_references(self, text: str) -> str:
+ """
+ Remove hallucinated or missing references from the text.
+
+ This function identifies and removes references that are marked as missing or hallucinated from the input text.
+
+ Args:
+ text (`str`):
+ The input text containing references.
+
+ Returns:
+ `str`: The text with hallucinated references removed.
+ """
+ lines = text.split("\n")
+ if len(lines) == 0:
+ return ""
+ clean_lines = remove_numbers(lines)
+ slices = get_slices(lines, clean_lines)
+ to_delete = []
+ for slice in slices:
+ to_delete.append(remove_slice_from_lines(lines, clean_lines, slice))
+ for to_delete in reversed(to_delete):
+ text = text.replace(to_delete, "\n\n[MISSING_PAGE_POST]\n\n")
+ text = re.sub(
+ r"## References\n+\[MISSING_PAGE_POST(:\d+)?\]",
+ "\n\n[MISSING_PAGE_POST\\1]",
+ text,
+ )
+ return text
+
+ def correct_tables(self, generation: str) -> str:
+ """
+ Takes a generated string and fixes tables/tabulars to make them match the markdown format needed.
+
+ Args:
+ generation (str): The generated text to be postprocessed.
+
+ Returns:
+ str: The postprocessed text.
+
+ Example:
+
+ ```python
+ correct_tables("\\begin{table} \\begin{tabular}{l l} & \\ \\end{tabular} \\end{table}")
+ "\\begin{table}\n\\begin{tabular}{l l} & \\ \\end{tabular}\n\\end{table}"
+ ```
+ """
+ # remove obvious wrong tables
+ for l in generation.split("\n"):
+ if l.count("\\begin{tabular}") > 15 or l.count("\\multicolumn") > 60 or l.count("&") > 400:
+ generation = generation.replace(l, "")
+ # whitespace corrections
+
+ generation = generation.replace("\\begin{table} \\begin{tabular}", "\\begin{table}\n\\begin{tabular}")
+ generation = generation.replace("\\end{tabular} \\end{table}", "\\end{tabular}\n\\end{table}")
+ generation = generation.replace("\\end{table} Tab", "\\end{table}\nTab")
+
+ generation = re.sub(r"(^.+)\\begin{tab", r"\1\n\\begin{tab", generation, flags=re.M)
+
+ # Remove left-aligned empty LaTeX tabular blocks.
+ generation = generation.replace(r"\begin{tabular}{l l} & \\ \end{tabular}", "")
+ # Remove tabulars with just 2 newline characters.
+ generation = generation.replace("\\begin{tabular}{}\n\n\\end{tabular}", "")
+ return generation
+
+ def post_process_single(self, generation: str, fix_markdown: bool = True) -> str:
+ """
+ Postprocess a single generated text. Regular expressions used here are taken directly from the Nougat article
+ authors. These expressions are commented for clarity and tested end-to-end in most cases.
+
+ Args:
+ generation (str): The generated text to be postprocessed.
+ fix_markdown (bool, optional): Whether to perform Markdown formatting fixes. Default is True.
+
+ Returns:
+ str: The postprocessed text.
+ """
+ generation = re.sub(
+ r"(?:\n|^)#+ \d*\W? ?(.{100,})", r"\n\1", generation
+ ) # too long section titles probably are none
+ generation = generation.strip()
+ # Remove LaTeX left margin tag
+ generation = generation.replace("\n* [leftmargin=*]\n", "\n")
+ # Remove lines with markdown headings starting with #, with numerals,
+ # and possibly roman numerals with trailing spaces and newlines
+ generation = re.sub(r"^#+ (?:\.?(?:\d|[ixv])+)*\s*(?:$|\n\s*)", "", generation, flags=re.M)
+ # most likely hallucinated titles
+ lines = generation.split("\n")
+ if lines[-1].startswith("#") and lines[-1].lstrip("#").startswith(" ") and len(lines) > 1:
+ logger.info("Likely hallucinated title at the end of the page: " + lines[-1])
+ generation = "\n".join(lines[:-1])
+ # obvious repetition detection
+ generation = truncate_repetitions(generation)
+ # Reference corrections
+ generation = self.remove_hallucinated_references(generation)
+ # Remove lines starting with asterisks and numbers like "*[1]" and followed by capital letters and periods (ie too long references)
+ generation = re.sub(r"^\* \[\d+\](\s?[A-W]\.+\s?){10,}.*$", "", generation, flags=re.M)
+ # Remove empty brackets after a reference number in brackets. *[12][]ABC will become *[12]ABC
+ generation = re.sub(r"^(\* \[\d+\])\[\](.*)$", r"\1\2", generation, flags=re.M)
+ # Remove single characters before or after 2 new lines
+ generation = re.sub(r"(^\w\n\n|\n\n\w$)", "", generation)
+ # pmc math artifact correction
+ generation = re.sub(
+ r"([\s.,()])_([a-zA-Z0-9])__([a-zA-Z0-9]){1,3}_([\s.,:()])",
+ r"\1\(\2_{\3}\)\4",
+ generation,
+ )
+ generation = re.sub(r"([\s.,\d])_([a-zA-Z0-9])_([\s.,\d;])", r"\1\(\2\)\3", generation)
+ # footnote mistakes
+ generation = re.sub(
+ r"(\nFootnote .*?:) (?:footnotetext|thanks):\W*(.*(?:\n\n|$))",
+ r"\1 \2",
+ generation,
+ )
+ # TODO Come up with footnote formatting inside a table
+ generation = re.sub(r"\[FOOTNOTE:.+?\](.*?)\[ENDFOOTNOTE\]", "", generation)
+ # itemize post processing
+ generation = normalize_list_like_lines(generation)
+
+ if generation.endswith((".", "}")):
+ generation += "\n\n"
+ if re.match(r"[A-Z0-9,;:]$", generation):
+ # add space in case it there is a comma or word ending
+ generation += " "
+ elif generation.startswith(("#", "**", "\\begin")):
+ generation = "\n\n" + generation
+ elif generation.split("\n")[-1].startswith(("#", "Figure", "Table")):
+ generation = generation + "\n\n"
+ else:
+ try:
+ last_word = generation.split(" ")[-1]
+ if last_word in nltk.corpus.words.words():
+ generation += " "
+ except LookupError:
+ # add space just in case. Will split words but better than concatenating them
+ generation += " "
+
+ # table corrections
+ generation = self.correct_tables(generation)
+ # Remove optional, empty square brackets after begin{array}
+ generation = generation.replace("\\begin{array}[]{", "\\begin{array}{")
+ # Remove empty or malformed LaTeX tabular blocks with 2 or more columns specified, with spaces and ampersands.
+ generation = re.sub(
+ r"\\begin{tabular}{([clr ]){2,}}\s*[& ]*\s*(\\\\)? \\end{tabular}",
+ "",
+ generation,
+ )
+ # Remove lines containing "S.A.B." one or more times. Was included in Nougat's code.
+ generation = re.sub(r"(\*\*S\. A\. B\.\*\*\n+){2,}", "", generation)
+ # Remove markdown-style headers that are incomplete or empty on multiple lines.
+ generation = re.sub(r"^#+( [\[\d\w])?$", "", generation, flags=re.M)
+ # Remove lines with just one period.
+ generation = re.sub(r"^\.\s*$", "", generation, flags=re.M)
+ # Replace instances of three or more newlines with just two newlines.
+ generation = re.sub(r"\n{3,}", "\n\n", generation)
+ if fix_markdown:
+ return markdown_compatible(generation)
+ else:
+ return generation
+
+ def post_process_generation(
+ self,
+ generation: Union[str, List[str]],
+ fix_markdown: bool = True,
+ num_workers: int = None,
+ ) -> Union[str, List[str]]:
+ """
+ Postprocess a generated text or a list of generated texts.
+
+ This function can be used to perform postprocessing on generated text, such as fixing Markdown formatting.
+
+ Postprocessing is quite slow so it is recommended to use multiprocessing to speed up the process.
+
+ Args:
+ generation (Union[str, List[str]]):
+ The generated text or a list of generated texts.
+ fix_markdown (`bool`, *optional*, defaults to `True`):
+ Whether to perform Markdown formatting fixes.
+ num_workers (`int`, *optional*):
+ Optional number of workers to pass to leverage multiprocessing (postprocessing several texts in
+ parallel).
+
+ Returns:
+ Union[str, List[str]]: The postprocessed text or list of postprocessed texts.
+ """
+ requires_backends(self, ["nltk", "levenshtein"])
+
+ if isinstance(generation, list):
+ if num_workers is not None and isinstance(num_workers, int):
+ with Pool(num_workers) as p:
+ return p.map(partial(self.post_process_single, fix_markdown=fix_markdown), generation)
+ else:
+ return [self.post_process_single(s, fix_markdown=fix_markdown) for s in generation]
+ else:
+ return self.post_process_single(generation, fix_markdown=fix_markdown)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/qdqbert/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/qdqbert/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3d161192d81b0da3d5841da50cfedc4d75394b50
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/qdqbert/__init__.py
@@ -0,0 +1,71 @@
+# Copyright 2021 NVIDIA Corporation and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
+
+
+_import_structure = {"configuration_qdqbert": ["QDQBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "QDQBertConfig"]}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_qdqbert"] = [
+ "QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "QDQBertForMaskedLM",
+ "QDQBertForMultipleChoice",
+ "QDQBertForNextSentencePrediction",
+ "QDQBertForQuestionAnswering",
+ "QDQBertForSequenceClassification",
+ "QDQBertForTokenClassification",
+ "QDQBertLayer",
+ "QDQBertLMHeadModel",
+ "QDQBertModel",
+ "QDQBertPreTrainedModel",
+ "load_tf_weights_in_qdqbert",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_qdqbert import QDQBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, QDQBertConfig
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_qdqbert import (
+ QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ QDQBertForMaskedLM,
+ QDQBertForMultipleChoice,
+ QDQBertForNextSentencePrediction,
+ QDQBertForQuestionAnswering,
+ QDQBertForSequenceClassification,
+ QDQBertForTokenClassification,
+ QDQBertLayer,
+ QDQBertLMHeadModel,
+ QDQBertModel,
+ QDQBertPreTrainedModel,
+ load_tf_weights_in_qdqbert,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/qdqbert/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/qdqbert/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9f3f294d86025ac90f05e0cbe34a091a0070e2ba
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/qdqbert/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/qdqbert/configuration_qdqbert.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/qdqbert/configuration_qdqbert.py
new file mode 100644
index 0000000000000000000000000000000000000000..1efa2ef811ecbe1d09a4a7897ff31b333bce795b
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/qdqbert/configuration_qdqbert.py
@@ -0,0 +1,125 @@
+# coding=utf-8
+# Copyright 2021 NVIDIA Corporation and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" QDQBERT model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+QDQBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "google-bert/bert-base-uncased": "https://huggingface.co/google-bert/bert-base-uncased/resolve/main/config.json",
+ # QDQBERT models can be loaded from any BERT checkpoint, available at https://huggingface.co/models?filter=bert
+}
+
+
+class QDQBertConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`QDQBertModel`]. It is used to instantiate an
+ QDQBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the BERT
+ [google-bert/bert-base-uncased](https://huggingface.co/google-bert/bert-base-uncased) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 30522):
+ Vocabulary size of the QDQBERT model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`QDQBertModel`].
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimension of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ type_vocab_size (`int`, *optional*, defaults to 2):
+ The vocabulary size of the `token_type_ids` passed when calling [`QDQBertModel`].
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ is_decoder (`bool`, *optional*, defaults to `False`):
+ Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`.
+
+ Examples:
+
+ ```python
+ >>> from transformers import QDQBertModel, QDQBertConfig
+
+ >>> # Initializing a QDQBERT google-bert/bert-base-uncased style configuration
+ >>> configuration = QDQBertConfig()
+
+ >>> # Initializing a model from the google-bert/bert-base-uncased style configuration
+ >>> model = QDQBertModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "qdqbert"
+
+ def __init__(
+ self,
+ vocab_size=30522,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.1,
+ attention_probs_dropout_prob=0.1,
+ max_position_embeddings=512,
+ type_vocab_size=2,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ use_cache=True,
+ pad_token_id=1,
+ bos_token_id=0,
+ eos_token_id=2,
+ **kwargs,
+ ):
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
+
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.intermediate_size = intermediate_size
+ self.hidden_act = hidden_act
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.initializer_range = initializer_range
+ self.type_vocab_size = type_vocab_size
+ self.layer_norm_eps = layer_norm_eps
+ self.use_cache = use_cache