diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/bertweet/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/bertweet/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..42e4a23337c20ceae77652f94c7438c8b0d400a1
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/bertweet/__init__.py
@@ -0,0 +1,29 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import _LazyModule
+
+
+_import_structure = {"tokenization_bertweet": ["BertweetTokenizer"]}
+
+
+if TYPE_CHECKING:
+ from .tokenization_bertweet import BertweetTokenizer
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/bertweet/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/bertweet/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c41a1e8ffce9efa4f5b4c4555e8fd7ca457d374d
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/bertweet/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/bertweet/__pycache__/tokenization_bertweet.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/bertweet/__pycache__/tokenization_bertweet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..03507f203d913818a870ceee881468a4fae3a41c
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/bertweet/__pycache__/tokenization_bertweet.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/bertweet/tokenization_bertweet.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/bertweet/tokenization_bertweet.py
new file mode 100644
index 0000000000000000000000000000000000000000..74bc040c25b13d070a16f7b976030f81b9a3cbbf
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/bertweet/tokenization_bertweet.py
@@ -0,0 +1,782 @@
+# coding=utf-8
+# Copyright (c) 2020, VinAI Research and the HuggingFace Inc. team.
+# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Tokenization classes for BERTweet"""
+
+
+import html
+import os
+import re
+from shutil import copyfile
+from typing import List, Optional, Tuple
+
+import regex
+
+from ...tokenization_utils import PreTrainedTokenizer
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {
+ "vocab_file": "vocab.txt",
+ "merges_file": "bpe.codes",
+}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {
+ "vinai/bertweet-base": "https://huggingface.co/vinai/bertweet-base/resolve/main/vocab.txt",
+ },
+ "merges_file": {
+ "vinai/bertweet-base": "https://huggingface.co/vinai/bertweet-base/resolve/main/bpe.codes",
+ },
+}
+
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
+ "vinai/bertweet-base": 128,
+}
+
+
+def get_pairs(word):
+ """
+ Return set of symbol pairs in a word.
+
+ Word is represented as tuple of symbols (symbols being variable-length strings).
+ """
+ pairs = set()
+ prev_char = word[0]
+ for char in word[1:]:
+ pairs.add((prev_char, char))
+ prev_char = char
+
+ pairs = set(pairs)
+ return pairs
+
+
+class BertweetTokenizer(PreTrainedTokenizer):
+ """
+ Constructs a BERTweet tokenizer, using Byte-Pair-Encoding.
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ merges_file (`str`):
+ Path to the merges file.
+ normalization (`bool`, *optional*, defaults to `False`):
+ Whether or not to apply a normalization preprocess.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
+ The token used is the `sep_token`.
+
+
+
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ cls_token (`str`, *optional*, defaults to `""`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ mask_token (`str`, *optional*, defaults to `""`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+
+ def __init__(
+ self,
+ vocab_file,
+ merges_file,
+ normalization=False,
+ bos_token="",
+ eos_token="",
+ sep_token="",
+ cls_token="",
+ unk_token="",
+ pad_token="",
+ mask_token="",
+ **kwargs,
+ ):
+ try:
+ from emoji import demojize
+
+ self.demojizer = demojize
+ except ImportError:
+ logger.warning(
+ "emoji is not installed, thus not converting emoticons or emojis into text. Install emoji: pip3"
+ " install emoji==0.6.0"
+ )
+ self.demojizer = None
+
+ self.vocab_file = vocab_file
+ self.merges_file = merges_file
+
+ self.encoder = {}
+ self.encoder[str(bos_token)] = 0
+ self.encoder[str(pad_token)] = 1
+ self.encoder[str(eos_token)] = 2
+ self.encoder[str(unk_token)] = 3
+
+ self.add_from_file(vocab_file)
+
+ self.decoder = {v: k for k, v in self.encoder.items()}
+
+ with open(merges_file, encoding="utf-8") as merges_handle:
+ merges = merges_handle.read().split("\n")[:-1]
+ merges = [tuple(merge.split()[:-1]) for merge in merges]
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
+ self.cache = {}
+
+ self.normalization = normalization
+ self.tweetPreprocessor = TweetTokenizer()
+ self.special_puncts = {"’": "'", "…": "..."}
+
+ super().__init__(
+ normalization=normalization,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ sep_token=sep_token,
+ cls_token=cls_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ **kwargs,
+ )
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A BERTweet sequence has the following format:
+
+ - single sequence: ` X `
+ - pair of sequences: ` A B `
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is None:
+ return [1] + ([0] * len(token_ids_0)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. BERTweet does
+ not make use of token type ids, therefore a list of zeros is returned.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of zeros.
+ """
+
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
+
+ @property
+ def vocab_size(self):
+ return len(self.encoder)
+
+ def get_vocab(self):
+ return dict(self.encoder, **self.added_tokens_encoder)
+
+ def bpe(self, token):
+ if token in self.cache:
+ return self.cache[token]
+ word = tuple(token)
+ word = tuple(list(word[:-1]) + [word[-1] + ""])
+ pairs = get_pairs(word)
+
+ if not pairs:
+ return token
+
+ while True:
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
+ if bigram not in self.bpe_ranks:
+ break
+ first, second = bigram
+ new_word = []
+ i = 0
+ while i < len(word):
+ try:
+ j = word.index(first, i)
+ except ValueError:
+ new_word.extend(word[i:])
+ break
+ else:
+ new_word.extend(word[i:j])
+ i = j
+
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
+ new_word.append(first + second)
+ i += 2
+ else:
+ new_word.append(word[i])
+ i += 1
+ new_word = tuple(new_word)
+ word = new_word
+ if len(word) == 1:
+ break
+ else:
+ pairs = get_pairs(word)
+ word = "@@ ".join(word)
+ word = word[:-4]
+ self.cache[token] = word
+ return word
+
+ def _tokenize(self, text):
+ """Tokenize a string."""
+ if self.normalization: # Perform Tweet normalization before performing BPE
+ text = self.normalizeTweet(text)
+
+ split_tokens = []
+ words = re.findall(r"\S+\n?", text)
+ for token in words:
+ split_tokens.extend(list(self.bpe(token).split(" ")))
+ return split_tokens
+
+ def normalizeTweet(self, tweet):
+ """
+ Normalize a raw Tweet
+ """
+ for punct in self.special_puncts:
+ tweet = tweet.replace(punct, self.special_puncts[punct])
+
+ tokens = self.tweetPreprocessor.tokenize(tweet)
+ normTweet = " ".join([self.normalizeToken(token) for token in tokens])
+
+ normTweet = (
+ normTweet.replace("cannot ", "can not ")
+ .replace("n't ", " n't ")
+ .replace("n 't ", " n't ")
+ .replace("ca n't", "can't")
+ .replace("ai n't", "ain't")
+ )
+ normTweet = (
+ normTweet.replace("'m ", " 'm ")
+ .replace("'re ", " 're ")
+ .replace("'s ", " 's ")
+ .replace("'ll ", " 'll ")
+ .replace("'d ", " 'd ")
+ .replace("'ve ", " 've ")
+ )
+ normTweet = (
+ normTweet.replace(" p . m .", " p.m.")
+ .replace(" p . m ", " p.m ")
+ .replace(" a . m .", " a.m.")
+ .replace(" a . m ", " a.m ")
+ )
+
+ return " ".join(normTweet.split())
+
+ def normalizeToken(self, token):
+ """
+ Normalize tokens in a Tweet
+ """
+ lowercased_token = token.lower()
+ if token.startswith("@"):
+ return "@USER"
+ elif lowercased_token.startswith("http") or lowercased_token.startswith("www"):
+ return "HTTPURL"
+ elif len(token) == 1:
+ if token in self.special_puncts:
+ return self.special_puncts[token]
+ if self.demojizer is not None:
+ return self.demojizer(token)
+ else:
+ return token
+ else:
+ return token
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.decoder.get(index, self.unk_token)
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ out_string = " ".join(tokens).replace("@@ ", "").strip()
+ return out_string
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+ out_merge_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+ elif not os.path.isfile(self.vocab_file):
+ with open(out_vocab_file, "wb") as fi:
+ content_spiece_model = self.sp_model.serialized_model_proto()
+ fi.write(content_spiece_model)
+
+ if os.path.abspath(self.merges_file) != os.path.abspath(out_merge_file):
+ copyfile(self.merges_file, out_merge_file)
+
+ return out_vocab_file, out_merge_file
+
+ # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
+ # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
+ # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
+ # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
+ # return ''.join(tokens_generated_so_far)
+
+ def add_from_file(self, f):
+ """
+ Loads a pre-existing dictionary from a text file and adds its symbols to this instance.
+ """
+ if isinstance(f, str):
+ try:
+ with open(f, "r", encoding="utf-8") as fd:
+ self.add_from_file(fd)
+ except FileNotFoundError as fnfe:
+ raise fnfe
+ except UnicodeError:
+ raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset")
+ return
+
+ lines = f.readlines()
+ for lineTmp in lines:
+ line = lineTmp.strip()
+ idx = line.rfind(" ")
+ if idx == -1:
+ raise ValueError("Incorrect dictionary format, expected ' '")
+ word = line[:idx]
+ self.encoder[word] = len(self.encoder)
+
+
+# Natural Language Toolkit: Twitter Tokenizer
+#
+# Copyright (C) 2001-2020 NLTK Project
+# Author: Christopher Potts
+# Ewan Klein (modifications)
+# Pierpaolo Pantone <> (modifications)
+# URL: http://nltk.org/
+# For license information, see LICENSE.TXT
+#
+
+
+"""
+Twitter-aware tokenizer, designed to be flexible and easy to adapt to new domains and tasks. The basic logic is this:
+
+1. The tuple regex_strings defines a list of regular expression strings.
+
+2. The regex_strings strings are put, in order, into a compiled regular expression object called word_re.
+
+3. The tokenization is done by word_re.findall(s), where s is the user-supplied string, inside the tokenize() method of
+ the class Tokenizer.
+
+4. When instantiating Tokenizer objects, there is a single option: preserve_case. By default, it is set to True. If it
+ is set to False, then the tokenizer will lowercase everything except for emoticons.
+
+"""
+
+
+######################################################################
+#
+# import regex # https://github.com/nltk/nltk/issues/2409
+# import html
+#
+######################################################################
+# The following strings are components in the regular expression
+# that is used for tokenizing. It's important that phone_number
+# appears first in the final regex (since it can contain whitespace).
+# It also could matter that tags comes after emoticons, due to the
+# possibility of having text like
+#
+# <:| and some text >:)
+#
+# Most importantly, the final element should always be last, since it
+# does a last ditch whitespace-based tokenization of whatever is left.
+
+# ToDo: Update with http://en.wikipedia.org/wiki/List_of_emoticons ?
+
+# This particular element is used in a couple ways, so we define it
+# with a name:
+# docstyle-ignore
+EMOTICONS = r"""
+ (?:
+ [<>]?
+ [:;=8] # eyes
+ [\-o\*\']? # optional nose
+ [\)\]\(\[dDpP/\:\}\{@\|\\] # mouth
+ |
+ [\)\]\(\[dDpP/\:\}\{@\|\\] # mouth
+ [\-o\*\']? # optional nose
+ [:;=8] # eyes
+ [<>]?
+ |
+ <3 # heart
+ )"""
+
+# URL pattern due to John Gruber, modified by Tom Winzig. See
+# https://gist.github.com/winzig/8894715
+# docstyle-ignore
+URLS = r""" # Capture 1: entire matched URL
+ (?:
+ https?: # URL protocol and colon
+ (?:
+ /{1,3} # 1-3 slashes
+ | # or
+ [a-z0-9%] # Single letter or digit or '%'
+ # (Trying not to match e.g. "URI::Escape")
+ )
+ | # or
+ # looks like domain name followed by a slash:
+ [a-z0-9.\-]+[.]
+ (?:[a-z]{2,13})
+ /
+ )
+ (?: # One or more:
+ [^\s()<>{}\[\]]+ # Run of non-space, non-()<>{}[]
+ | # or
+ \([^\s()]*?\([^\s()]+\)[^\s()]*?\) # balanced parens, one level deep: (...(...)...)
+ |
+ \([^\s]+?\) # balanced parens, non-recursive: (...)
+ )+
+ (?: # End with:
+ \([^\s()]*?\([^\s()]+\)[^\s()]*?\) # balanced parens, one level deep: (...(...)...)
+ |
+ \([^\s]+?\) # balanced parens, non-recursive: (...)
+ | # or
+ [^\s`!()\[\]{};:'".,<>?«»“”‘’] # not a space or one of these punct chars
+ )
+ | # OR, the following to match naked domains:
+ (?:
+ (?\s]+>""",
+ # ASCII Arrows
+ r"""[\-]+>|<[\-]+""",
+ # Twitter username:
+ r"""(?:@[\w_]+)""",
+ # Twitter hashtags:
+ r"""(?:\#+[\w_]+[\w\'_\-]*[\w_]+)""",
+ # email addresses
+ r"""[\w.+-]+@[\w-]+\.(?:[\w-]\.?)+[\w-]""",
+ # docstyle-ignore
+ # Remaining word types:
+ r"""
+ (?:[^\W\d_](?:[^\W\d_]|['\-_])+[^\W\d_]) # Words with apostrophes or dashes.
+ |
+ (?:[+\-]?\d+[,/.:-]\d+[+\-]?) # Numbers, including fractions, decimals.
+ |
+ (?:[\w_]+) # Words without apostrophes or dashes.
+ |
+ (?:\.(?:\s*\.){1,}) # Ellipsis dots.
+ |
+ (?:\S) # Everything else that isn't whitespace.
+ """,
+)
+
+######################################################################
+# This is the core tokenizing regex:
+
+WORD_RE = regex.compile(r"""(%s)""" % "|".join(REGEXPS), regex.VERBOSE | regex.I | regex.UNICODE)
+
+# WORD_RE performs poorly on these patterns:
+HANG_RE = regex.compile(r"([^a-zA-Z0-9])\1{3,}")
+
+# The emoticon string gets its own regex so that we can preserve case for
+# them as needed:
+EMOTICON_RE = regex.compile(EMOTICONS, regex.VERBOSE | regex.I | regex.UNICODE)
+
+# These are for regularizing HTML entities to Unicode:
+ENT_RE = regex.compile(r"&(#?(x?))([^&;\s]+);")
+
+
+######################################################################
+# Functions for converting html entities
+######################################################################
+
+
+def _str_to_unicode(text, encoding=None, errors="strict"):
+ if encoding is None:
+ encoding = "utf-8"
+ if isinstance(text, bytes):
+ return text.decode(encoding, errors)
+ return text
+
+
+def _replace_html_entities(text, keep=(), remove_illegal=True, encoding="utf-8"):
+ """
+ Remove entities from text by converting them to their corresponding unicode character.
+
+ Args:
+ text:
+ A unicode string or a byte string encoded in the given *encoding* (which defaults to 'utf-8').
+ keep (list):
+ List of entity names which should not be replaced. This supports both numeric entities (`nnnn;` and
+ `hhhh;`) and named entities (such as ` ` or `>`).
+ remove_illegal (bool):
+ If `True`, entities that can't be converted are removed. Otherwise, entities that can't be converted are
+ kept "as is".
+
+ Returns: A unicode string with the entities removed.
+
+ See https://github.com/scrapy/w3lib/blob/master/w3lib/html.py
+
+ Examples:
+
+ ```python
+ >>> from nltk.tokenize.casual import _replace_html_entities
+
+ >>> _replace_html_entities(b"Price: £100")
+ 'Price: \\xa3100'
+
+ >>> print(_replace_html_entities(b"Price: £100"))
+ Price: £100
+ ```"""
+
+ def _convert_entity(match):
+ entity_body = match.group(3)
+ if match.group(1):
+ try:
+ if match.group(2):
+ number = int(entity_body, 16)
+ else:
+ number = int(entity_body, 10)
+ # Numeric character references in the 80-9F range are typically
+ # interpreted by browsers as representing the characters mapped
+ # to bytes 80-9F in the Windows-1252 encoding. For more info
+ # see: https://en.wikipedia.org/wiki/ISO/IEC_8859-1#Similar_character_sets
+ if 0x80 <= number <= 0x9F:
+ return bytes((number,)).decode("cp1252")
+ except ValueError:
+ number = None
+ else:
+ if entity_body in keep:
+ return match.group(0)
+ else:
+ number = html.entities.name2codepoint.get(entity_body)
+ if number is not None:
+ try:
+ return chr(number)
+ except (ValueError, OverflowError):
+ pass
+
+ return "" if remove_illegal else match.group(0)
+
+ return ENT_RE.sub(_convert_entity, _str_to_unicode(text, encoding))
+
+
+######################################################################
+
+
+class TweetTokenizer:
+ r"""
+ Examples:
+
+ ```python
+ >>> # Tokenizer for tweets.
+ >>> from nltk.tokenize import TweetTokenizer
+
+ >>> tknzr = TweetTokenizer()
+ >>> s0 = "This is a cooool #dummysmiley: :-) :-P <3 and some arrows < > -> <--"
+ >>> tknzr.tokenize(s0)
+ ['This', 'is', 'a', 'cooool', '#dummysmiley', ':', ':-)', ':-P', '<3', 'and', 'some', 'arrows', '<', '>', '->', '<--']
+
+ >>> # Examples using *strip_handles* and *reduce_len parameters*:
+ >>> tknzr = TweetTokenizer(strip_handles=True, reduce_len=True)
+ >>> s1 = "@remy: This is waaaaayyyy too much for you!!!!!!"
+ >>> tknzr.tokenize(s1)
+ [':', 'This', 'is', 'waaayyy', 'too', 'much', 'for', 'you', '!', '!', '!']
+ ```"""
+
+ def __init__(self, preserve_case=True, reduce_len=False, strip_handles=False):
+ self.preserve_case = preserve_case
+ self.reduce_len = reduce_len
+ self.strip_handles = strip_handles
+
+ def tokenize(self, text):
+ """
+ Args:
+ text: str
+
+ Returns: list(str) A tokenized list of strings; concatenating this list returns the original string if
+ `preserve_case=False`
+ """
+ # Fix HTML character entities:
+ text = _replace_html_entities(text)
+ # Remove username handles
+ if self.strip_handles:
+ text = remove_handles(text)
+ # Normalize word lengthening
+ if self.reduce_len:
+ text = reduce_lengthening(text)
+ # Shorten problematic sequences of characters
+ safe_text = HANG_RE.sub(r"\1\1\1", text)
+ # Tokenize:
+ words = WORD_RE.findall(safe_text)
+ # Possibly alter the case, but avoid changing emoticons like :D into :d:
+ if not self.preserve_case:
+ words = [x if EMOTICON_RE.search(x) else x.lower() for x in words]
+ return words
+
+
+######################################################################
+# Normalization Functions
+######################################################################
+
+
+def reduce_lengthening(text):
+ """
+ Replace repeated character sequences of length 3 or greater with sequences of length 3.
+ """
+ pattern = regex.compile(r"(.)\1{2,}")
+ return pattern.sub(r"\1\1\1", text)
+
+
+def remove_handles(text):
+ """
+ Remove Twitter username handles from text.
+ """
+ pattern = regex.compile(
+ r"(?>> from transformers import ChineseCLIPTextConfig, ChineseCLIPTextModel
+
+ >>> # Initializing a ChineseCLIPTextConfig with OFA-Sys/chinese-clip-vit-base-patch16 style configuration
+ >>> configuration = ChineseCLIPTextConfig()
+
+ >>> # Initializing a ChineseCLIPTextModel (with random weights) from the OFA-Sys/chinese-clip-vit-base-patch16 style configuration
+ >>> model = ChineseCLIPTextModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "chinese_clip_text_model"
+
+ def __init__(
+ self,
+ vocab_size=30522,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.1,
+ attention_probs_dropout_prob=0.1,
+ max_position_embeddings=512,
+ type_vocab_size=2,
+ initializer_range=0.02,
+ initializer_factor=1.0,
+ layer_norm_eps=1e-12,
+ pad_token_id=0,
+ position_embedding_type="absolute",
+ use_cache=True,
+ **kwargs,
+ ):
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
+
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.hidden_act = hidden_act
+ self.intermediate_size = intermediate_size
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.max_position_embeddings = max_position_embeddings
+ self.type_vocab_size = type_vocab_size
+ self.initializer_range = initializer_range
+ self.initializer_factor = initializer_factor
+ self.layer_norm_eps = layer_norm_eps
+ self.position_embedding_type = position_embedding_type
+ self.use_cache = use_cache
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
+ cls._set_token_in_kwargs(kwargs)
+
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
+
+ # get the vision config dict if we are loading from ChineseCLIPConfig
+ if config_dict.get("model_type") == "chinese_clip":
+ config_dict = config_dict["text_config"]
+
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
+ logger.warning(
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
+ )
+
+ return cls.from_dict(config_dict, **kwargs)
+
+
+class ChineseCLIPVisionConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`ChineseCLIPModel`]. It is used to instantiate an
+ ChineseCLIP model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the ChineseCLIP
+ [OFA-Sys/chinese-clip-vit-base-patch16](https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ projection_dim (`int`, *optional*, defaults to 512):
+ Dimentionality of text and vision projection layers.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ num_channels (`int`, *optional*, defaults to 3):
+ The number of input channels.
+ image_size (`int`, *optional*, defaults to 224):
+ The size (resolution) of each image.
+ patch_size (`int`, *optional*, defaults to 32):
+ The size (resolution) of each patch.
+ hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
+ The epsilon used by the layer normalization layers.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ initializer_factor (`float`, *optional*, defaults to 1.0):
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
+ testing).
+ Example:
+ ```python
+ >>> from transformers import ChineseCLIPVisionConfig, ChineseCLIPVisionModel
+
+ >>> # Initializing a ChineseCLIPVisionConfig with OFA-Sys/chinese-clip-vit-base-patch16 style configuration
+ >>> configuration = ChineseCLIPVisionConfig()
+
+ >>> # Initializing a ChineseCLIPVisionModel (with random weights) from the OFA-Sys/chinese-clip-vit-base-patch16 style configuration
+ >>> model = ChineseCLIPVisionModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "chinese_clip_vision_model"
+
+ def __init__(
+ self,
+ hidden_size=768,
+ intermediate_size=3072,
+ projection_dim=512,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ num_channels=3,
+ image_size=224,
+ patch_size=32,
+ hidden_act="quick_gelu",
+ layer_norm_eps=1e-5,
+ attention_dropout=0.0,
+ initializer_range=0.02,
+ initializer_factor=1.0,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.projection_dim = projection_dim
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.num_channels = num_channels
+ self.patch_size = patch_size
+ self.image_size = image_size
+ self.initializer_range = initializer_range
+ self.initializer_factor = initializer_factor
+ self.attention_dropout = attention_dropout
+ self.layer_norm_eps = layer_norm_eps
+ self.hidden_act = hidden_act
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
+ cls._set_token_in_kwargs(kwargs)
+
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
+
+ # get the vision config dict if we are loading from ChineseCLIPConfig
+ if config_dict.get("model_type") == "chinese_clip":
+ config_dict = config_dict["vision_config"]
+
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
+ logger.warning(
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
+ )
+
+ return cls.from_dict(config_dict, **kwargs)
+
+
+class ChineseCLIPConfig(PretrainedConfig):
+ r"""
+ [`ChineseCLIPConfig`] is the configuration class to store the configuration of a [`ChineseCLIPModel`]. It is used
+ to instantiate Chinese-CLIP model according to the specified arguments, defining the text model and vision model
+ configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the
+ Chinese-CLIP [OFA-Sys/chinese-clip-vit-base-patch16](https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16)
+ architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ text_config (`dict`, *optional*):
+ Dictionary of configuration options used to initialize [`ChineseCLIPTextConfig`].
+ vision_config (`dict`, *optional*):
+ Dictionary of configuration options used to initialize [`ChineseCLIPVisionConfig`].
+ projection_dim (`int`, *optional*, defaults to 512):
+ Dimentionality of text and vision projection layers.
+ logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
+ The inital value of the *logit_scale* paramter. Default is used as per the original ChineseCLIP
+ implementation.
+ kwargs (*optional*):
+ Dictionary of keyword arguments.
+
+ Example:
+
+ ```python
+ >>> from transformers import ChineseCLIPConfig, ChineseCLIPModel
+
+ >>> # Initializing a ChineseCLIPConfig with OFA-Sys/chinese-clip-vit-base-patch16 style configuration
+ >>> configuration = ChineseCLIPConfig()
+
+ >>> # Initializing a ChineseCLIPModel (with random weights) from the OFA-Sys/chinese-clip-vit-base-patch16 style configuration
+ >>> model = ChineseCLIPModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+
+ >>> # We can also initialize a ChineseCLIPConfig from a ChineseCLIPTextConfig and a ChineseCLIPVisionConfig
+
+ >>> # Initializing a ChineseCLIPTextConfig and ChineseCLIPVisionConfig configuration
+ >>> config_text = ChineseCLIPTextConfig()
+ >>> config_vision = ChineseCLIPVisionConfig()
+
+ >>> config = ChineseCLIPConfig.from_text_vision_configs(config_text, config_vision)
+ ```"""
+
+ model_type = "chinese_clip"
+
+ def __init__(
+ self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, **kwargs
+ ):
+ # If `_config_dict` exist, we use them for the backward compatibility.
+ # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
+ # of confusion!).
+ text_config_dict = kwargs.pop("text_config_dict", None)
+ vision_config_dict = kwargs.pop("vision_config_dict", None)
+
+ super().__init__(**kwargs)
+
+ # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
+ # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
+ # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
+ if text_config_dict is not None:
+ if text_config is None:
+ text_config = {}
+
+ # This is the complete result when using `text_config_dict`.
+ _text_config_dict = ChineseCLIPTextConfig(**text_config_dict).to_dict()
+
+ # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
+ for key, value in _text_config_dict.items():
+ if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
+ # If specified in `text_config_dict`
+ if key in text_config_dict:
+ message = (
+ f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
+ f'The value `text_config_dict["{key}"]` will be used instead.'
+ )
+ # If inferred from default argument values (just to be super careful)
+ else:
+ message = (
+ f"`text_config_dict` is provided which will be used to initialize `ChineseCLIPTextConfig`. "
+ f'The value `text_config["{key}"]` will be overriden.'
+ )
+ logger.info(message)
+
+ # Update all values in `text_config` with the ones in `_text_config_dict`.
+ text_config.update(_text_config_dict)
+
+ if vision_config_dict is not None:
+ if vision_config is None:
+ vision_config = {}
+
+ # This is the complete result when using `vision_config_dict`.
+ _vision_config_dict = ChineseCLIPVisionConfig(**vision_config_dict).to_dict()
+ # convert keys to string instead of integer
+ if "id2label" in _vision_config_dict:
+ _vision_config_dict["id2label"] = {
+ str(key): value for key, value in _vision_config_dict["id2label"].items()
+ }
+
+ # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
+ for key, value in _vision_config_dict.items():
+ if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
+ # If specified in `vision_config_dict`
+ if key in vision_config_dict:
+ message = (
+ f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
+ f'values. The value `vision_config_dict["{key}"]` will be used instead.'
+ )
+ # If inferred from default argument values (just to be super careful)
+ else:
+ message = (
+ f"`vision_config_dict` is provided which will be used to initialize "
+ f'`ChineseCLIPVisionConfig`. The value `vision_config["{key}"]` will be overriden.'
+ )
+ logger.info(message)
+
+ # Update all values in `vision_config` with the ones in `_vision_config_dict`.
+ vision_config.update(_vision_config_dict)
+
+ if text_config is None:
+ text_config = {}
+ logger.info("`text_config` is `None`. Initializing the `ChineseCLIPTextConfig` with default values.")
+
+ if vision_config is None:
+ vision_config = {}
+ logger.info("`vision_config` is `None`. initializing the `ChineseCLIPVisionConfig` with default values.")
+
+ self.text_config = ChineseCLIPTextConfig(**text_config)
+ self.vision_config = ChineseCLIPVisionConfig(**vision_config)
+
+ self.projection_dim = projection_dim
+ self.logit_scale_init_value = logit_scale_init_value
+ self.initializer_factor = 1.0
+ self.initializer_range = 0.02
+
+ @classmethod
+ def from_text_vision_configs(
+ cls, text_config: ChineseCLIPTextConfig, vision_config: ChineseCLIPVisionConfig, **kwargs
+ ):
+ r"""
+ Instantiate a [`ChineseCLIPConfig`] (or a derived class) from Chinese-CLIP text model configuration and
+ Chinese-CLIP vision model configuration. Returns:
+ [`ChineseCLIPConfig`]: An instance of a configuration object
+ """
+
+ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
+
+
+class ChineseCLIPOnnxConfig(OnnxConfig):
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ return OrderedDict(
+ [
+ ("input_ids", {0: "batch", 1: "sequence"}),
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
+ ("attention_mask", {0: "batch", 1: "sequence"}),
+ ]
+ )
+
+ @property
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
+ return OrderedDict(
+ [
+ ("logits_per_image", {0: "batch"}),
+ ("logits_per_text", {0: "batch"}),
+ ("text_embeds", {0: "batch"}),
+ ("image_embeds", {0: "batch"}),
+ ]
+ )
+
+ @property
+ def atol_for_validation(self) -> float:
+ return 1e-4
+
+ def generate_dummy_inputs(
+ self,
+ processor: "ProcessorMixin",
+ batch_size: int = -1,
+ seq_length: int = -1,
+ framework: Optional["TensorType"] = None,
+ ) -> Mapping[str, Any]:
+ text_input_dict = super().generate_dummy_inputs(
+ processor.tokenizer, batch_size=batch_size, seq_length=seq_length, framework=framework
+ )
+ image_input_dict = super().generate_dummy_inputs(
+ processor.image_processor, batch_size=batch_size, framework=framework
+ )
+ return {**text_input_dict, **image_input_dict}
+
+ @property
+ def default_onnx_opset(self) -> int:
+ return 14
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/convert_chinese_clip_original_pytorch_to_hf.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/convert_chinese_clip_original_pytorch_to_hf.py
new file mode 100644
index 0000000000000000000000000000000000000000..02c4b7b754b295016c23b114213d1dd0353363e1
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/convert_chinese_clip_original_pytorch_to_hf.py
@@ -0,0 +1,134 @@
+# coding=utf-8
+# Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+
+import torch
+
+from transformers import ChineseCLIPConfig, ChineseCLIPModel
+
+
+def copy_attn_layer(hf_attn_layer, pt_weights, prefix):
+ q_proj, k_proj, v_proj = pt_weights[f"{prefix}.in_proj_weight"].chunk(3, dim=0)
+ q_proj_bias, k_proj_bias, v_proj_bias = pt_weights[f"{prefix}.in_proj_bias"].chunk(3, dim=0)
+
+ out_proj_weights = pt_weights[f"{prefix}.out_proj.weight"]
+ out_proj_bias = pt_weights[f"{prefix}.out_proj.bias"]
+
+ hf_attn_layer.q_proj.weight.data = q_proj
+ hf_attn_layer.q_proj.bias.data = q_proj_bias
+
+ hf_attn_layer.k_proj.weight.data = k_proj
+ hf_attn_layer.k_proj.bias.data = k_proj_bias
+
+ hf_attn_layer.v_proj.weight.data = v_proj
+ hf_attn_layer.v_proj.bias.data = v_proj_bias
+
+ hf_attn_layer.out_proj.weight.data = out_proj_weights
+ hf_attn_layer.out_proj.bias.data = out_proj_bias
+
+
+def copy_mlp(hf_mlp, pt_weights, prefix):
+ copy_linear(hf_mlp.fc1, pt_weights, f"{prefix}.c_fc")
+ copy_linear(hf_mlp.fc2, pt_weights, f"{prefix}.c_proj")
+
+
+def copy_linear(hf_linear, pt_weights, prefix):
+ hf_linear.weight.data = pt_weights[f"{prefix}.weight"].data
+ hf_linear.bias.data = pt_weights[f"{prefix}.bias"].data
+
+
+def copy_layer(hf_layer, pt_weights, prefix):
+ # copy layer norms
+ copy_linear(hf_layer.layer_norm1, pt_weights, f"{prefix}.ln_1")
+ copy_linear(hf_layer.layer_norm2, pt_weights, f"{prefix}.ln_2")
+
+ # copy MLP
+ copy_mlp(hf_layer.mlp, pt_weights, f"{prefix}.mlp")
+
+ # copy attn
+ copy_attn_layer(hf_layer.self_attn, pt_weights, f"{prefix}.attn")
+
+
+def copy_layers(hf_layers, pt_weights, prefix):
+ for layer_id, hf_layer in enumerate(hf_layers):
+ copy_layer(hf_layer, pt_weights, f"{prefix}.{layer_id}")
+
+
+def copy_text_model_and_projection(hf_model, pt_weights):
+ # copy projection
+ hf_model.text_projection.weight.data = pt_weights["text_projection"].data.T
+
+ # copy text encoder
+ for name, param in hf_model.text_model.named_parameters():
+ param.data = pt_weights[f"bert.{name}"].data
+
+
+def copy_vision_model_and_projection(hf_model, pt_weights):
+ # copy projection
+ hf_model.visual_projection.weight.data = pt_weights["visual.proj"].data.T
+
+ # copy layer norms
+ copy_linear(hf_model.vision_model.pre_layrnorm, pt_weights, "visual.ln_pre")
+ copy_linear(hf_model.vision_model.post_layernorm, pt_weights, "visual.ln_post")
+
+ # copy embeddings
+ hf_model.vision_model.embeddings.patch_embedding.weight.data = pt_weights["visual.conv1.weight"].data
+ hf_model.vision_model.embeddings.class_embedding.data = pt_weights["visual.class_embedding"].data
+ hf_model.vision_model.embeddings.position_embedding.weight.data = pt_weights["visual.positional_embedding"].data
+
+ # copy encoder
+ copy_layers(hf_model.vision_model.encoder.layers, pt_weights, "visual.transformer.resblocks")
+
+
+@torch.no_grad()
+def convert_chinese_clip_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path=None):
+ """
+ Copy/paste/tweak model's weights to transformers design.
+ """
+
+ assert config_path is not None, "Please specify the ChineseCLIP model config of the corresponding model size."
+ config = ChineseCLIPConfig.from_pretrained(config_path)
+
+ hf_model = ChineseCLIPModel(config).eval()
+
+ pt_weights = torch.load(checkpoint_path, map_location="cpu")["state_dict"]
+ pt_weights = {(name[7:] if name.startswith("module.") else name): value for name, value in pt_weights.items()}
+
+ copy_text_model_and_projection(hf_model, pt_weights)
+ copy_vision_model_and_projection(hf_model, pt_weights)
+ hf_model.logit_scale.data = pt_weights["logit_scale"].data
+
+ hf_model.save_pretrained(pytorch_dump_folder_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--pytorch_dump_folder_path",
+ default=None,
+ type=str,
+ help="Path to the output folder storing converted hf PyTorch model.",
+ )
+ parser.add_argument(
+ "--checkpoint_path", default=None, type=str, help="Path to original github format ChineseCLIP checkpoint."
+ )
+ parser.add_argument(
+ "--config_path", default=None, required=True, type=str, help="Path to hf config.json of model to convert."
+ )
+ args = parser.parse_args()
+
+ convert_chinese_clip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
+ print("The conversion is finished!")
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/feature_extraction_chinese_clip.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/feature_extraction_chinese_clip.py
new file mode 100644
index 0000000000000000000000000000000000000000..09aa4106b718ebf39c793b8325892670af566fe3
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/feature_extraction_chinese_clip.py
@@ -0,0 +1,33 @@
+# coding=utf-8
+# Copyright 2021 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Feature extractor class for Chinese-CLIP."""
+
+import warnings
+
+from ...utils import logging
+from .image_processing_chinese_clip import ChineseCLIPImageProcessor
+
+
+logger = logging.get_logger(__name__)
+
+
+class ChineseCLIPFeatureExtractor(ChineseCLIPImageProcessor):
+ def __init__(self, *args, **kwargs) -> None:
+ warnings.warn(
+ "The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
+ " Please use ChineseCLIPImageProcessor instead.",
+ FutureWarning,
+ )
+ super().__init__(*args, **kwargs)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/image_processing_chinese_clip.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/image_processing_chinese_clip.py
new file mode 100644
index 0000000000000000000000000000000000000000..60f40272bf92716735f62371506202bf3fdd70cd
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/image_processing_chinese_clip.py
@@ -0,0 +1,331 @@
+# coding=utf-8
+# Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Image processor class for Chinese-CLIP."""
+
+from typing import Dict, List, Optional, Union
+
+import numpy as np
+
+from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
+from ...image_transforms import (
+ convert_to_rgb,
+ get_resize_output_image_size,
+ resize,
+ to_channel_dimension_format,
+)
+from ...image_utils import (
+ OPENAI_CLIP_MEAN,
+ OPENAI_CLIP_STD,
+ ChannelDimension,
+ ImageInput,
+ PILImageResampling,
+ infer_channel_dimension_format,
+ is_scaled_image,
+ make_list_of_images,
+ to_numpy_array,
+ valid_images,
+ validate_kwargs,
+ validate_preprocess_arguments,
+)
+from ...utils import TensorType, is_vision_available, logging
+
+
+logger = logging.get_logger(__name__)
+
+
+if is_vision_available():
+ import PIL
+
+
+class ChineseCLIPImageProcessor(BaseImageProcessor):
+ r"""
+ Constructs a Chinese-CLIP image processor.
+
+ Args:
+ do_resize (`bool`, *optional*, defaults to `True`):
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
+ `do_resize` in the `preprocess` method.
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
+ Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
+ the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
+ method.
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
+ Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
+ do_center_crop (`bool`, *optional*, defaults to `True`):
+ Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the
+ `preprocess` method.
+ crop_size (`Dict[str, int]` *optional*, defaults to 224):
+ Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
+ method.
+ do_rescale (`bool`, *optional*, defaults to `True`):
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
+ the `preprocess` method.
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
+ Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
+ method.
+ do_normalize (`bool`, *optional*, defaults to `True`):
+ Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
+ Can be overridden by the `image_std` parameter in the `preprocess` method.
+ do_convert_rgb (`bool`, *optional*, defaults to `True`):
+ Whether to convert the image to RGB.
+ """
+
+ model_input_names = ["pixel_values"]
+
+ def __init__(
+ self,
+ do_resize: bool = True,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
+ do_center_crop: bool = True,
+ crop_size: Dict[str, int] = None,
+ do_rescale: bool = True,
+ rescale_factor: Union[int, float] = 1 / 255,
+ do_normalize: bool = True,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ do_convert_rgb: bool = True,
+ **kwargs,
+ ) -> None:
+ super().__init__(**kwargs)
+ size = size if size is not None else {"shortest_edge": 224}
+ size = get_size_dict(size, default_to_square=False)
+ crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
+ crop_size = get_size_dict(crop_size)
+
+ self.do_resize = do_resize
+ self.size = size
+ self.resample = resample
+ self.do_center_crop = do_center_crop
+ self.crop_size = crop_size
+ self.do_rescale = do_rescale
+ self.rescale_factor = rescale_factor
+ self.do_normalize = do_normalize
+ self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
+ self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
+ self.do_convert_rgb = do_convert_rgb
+ self._valid_processor_keys = [
+ "images",
+ "do_resize",
+ "size",
+ "resample",
+ "do_center_crop",
+ "crop_size",
+ "do_rescale",
+ "rescale_factor",
+ "do_normalize",
+ "image_mean",
+ "image_std",
+ "do_convert_rgb",
+ "return_tensors",
+ "data_format",
+ "input_data_format",
+ ]
+
+ def resize(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> np.ndarray:
+ """
+ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
+ resized to keep the input aspect ratio.
+
+ Args:
+ image (`np.ndarray`):
+ Image to resize.
+ size (`Dict[str, int]`):
+ Size of the output image.
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
+ Resampling filter to use when resiizing the image.
+ data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred from the input
+ image.
+ """
+ size = get_size_dict(size, default_to_square=False)
+ output_size = get_resize_output_image_size(
+ image, size=(size["height"], size["width"]), default_to_square=False, input_data_format=input_data_format
+ )
+ return resize(
+ image,
+ size=output_size,
+ resample=resample,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ **kwargs,
+ )
+
+ def preprocess(
+ self,
+ images: ImageInput,
+ do_resize: bool = None,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = None,
+ do_center_crop: bool = None,
+ crop_size: int = None,
+ do_rescale: bool = None,
+ rescale_factor: float = None,
+ do_normalize: bool = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ do_convert_rgb: bool = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> PIL.Image.Image:
+ """
+ Preprocess an image or batch of images.
+
+ Args:
+ images (`ImageInput`):
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
+ Whether to resize the image.
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
+ Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
+ the longest edge resized to keep the input aspect ratio.
+ resample (`int`, *optional*, defaults to `self.resample`):
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
+ has an effect if `do_resize` is set to `True`.
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
+ Whether to center crop the image.
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
+ Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
+ Whether to rescale the image.
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
+ Whether to normalize the image.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
+ Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
+ Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
+ `True`.
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
+ Whether to convert the image to RGB.
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Can be one of:
+ - Unset: Return a list of `np.ndarray`.
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format for the output image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - Unset: Use the channel dimension format of the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ """
+ do_resize = do_resize if do_resize is not None else self.do_resize
+ size = size if size is not None else self.size
+ size = get_size_dict(size, default_to_square=False)
+ resample = resample if resample is not None else self.resample
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
+ crop_size = crop_size if crop_size is not None else self.crop_size
+ crop_size = get_size_dict(crop_size)
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
+ image_mean = image_mean if image_mean is not None else self.image_mean
+ image_std = image_std if image_std is not None else self.image_std
+ do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
+
+ images = make_list_of_images(images)
+
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
+
+ if not valid_images(images):
+ raise ValueError(
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+ validate_preprocess_arguments(
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ do_center_crop=do_center_crop,
+ crop_size=crop_size,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ )
+ if do_convert_rgb:
+ images = [convert_to_rgb(image) for image in images]
+
+ # All transformations expect numpy arrays.
+ images = [to_numpy_array(image) for image in images]
+
+ if is_scaled_image(images[0]) and do_rescale:
+ logger.warning_once(
+ "It looks like you are trying to rescale already rescaled images. If the input"
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
+ )
+
+ if input_data_format is None:
+ # We assume that all images have the same channel dimension format.
+ input_data_format = infer_channel_dimension_format(images[0])
+
+ if do_resize:
+ images = [
+ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ if do_center_crop:
+ images = [
+ self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
+ ]
+
+ if do_rescale:
+ images = [
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ if do_normalize:
+ images = [
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ images = [
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
+ ]
+
+ data = {"pixel_values": images}
+ return BatchFeature(data=data, tensor_type=return_tensors)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/modeling_chinese_clip.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/modeling_chinese_clip.py
new file mode 100644
index 0000000000000000000000000000000000000000..a16fb081b1935769dee60909d3d9314693fcb207
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/modeling_chinese_clip.py
@@ -0,0 +1,1564 @@
+# coding=utf-8
+# Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch Chinese-CLIP model."""
+
+
+import math
+from dataclasses import dataclass
+from typing import Any, List, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+
+from ...activations import ACT2FN
+from ...modeling_outputs import (
+ BaseModelOutput,
+ BaseModelOutputWithPastAndCrossAttentions,
+ BaseModelOutputWithPooling,
+ BaseModelOutputWithPoolingAndCrossAttentions,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_chinese_clip import ChineseCLIPConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "OFA-Sys/chinese-clip-vit-base-patch16"
+_CONFIG_FOR_DOC = "ChineseCLIPConfig"
+
+CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "OFA-Sys/chinese-clip-vit-base-patch16",
+ # See all Chinese-CLIP models at https://huggingface.co/models?filter=chinese_clip
+]
+
+
+# https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html
+# Copied from transformers.models.clip.modeling_clip.contrastive_loss
+def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
+ return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device))
+
+
+def chinese_clip_loss(similarity: torch.Tensor) -> torch.Tensor:
+ caption_loss = contrastive_loss(similarity)
+ image_loss = contrastive_loss(similarity.t())
+ return (caption_loss + image_loss) / 2.0
+
+
+@dataclass
+class ChineseCLIPOutput(ModelOutput):
+ """
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
+ Contrastive loss for image-text similarity.
+ logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
+ The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
+ similarity scores.
+ logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
+ The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
+ similarity scores.
+ text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
+ The text embeddings obtained by applying the projection layer to the pooled output of
+ [`ChineseCLIPTextModel`].
+ image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
+ The image embeddings obtained by applying the projection layer to the pooled output of
+ [`ChineseCLIPVisionModel`].
+ text_model_output(`BaseModelOutputWithPoolingAndCrossAttentions`):
+ The output of the [`ChineseCLIPTextModel`].
+ vision_model_output(`BaseModelOutputWithPoolingAndCrossAttentions`):
+ The output of the [`ChineseCLIPVisionModel`].
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits_per_image: torch.FloatTensor = None
+ logits_per_text: torch.FloatTensor = None
+ text_embeds: torch.FloatTensor = None
+ image_embeds: torch.FloatTensor = None
+ text_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None
+ vision_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None
+
+ def to_tuple(self) -> Tuple[Any]:
+ return tuple(
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
+ for k in self.keys()
+ )
+
+
+# Copied from transformers.models.bert.modeling_bert.BertEmbeddings with Bert->ChineseCLIPText
+class ChineseCLIPTextEmbeddings(nn.Module):
+ """Construct the embeddings from word, position and token_type embeddings."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
+
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
+ # any TensorFlow checkpoint file
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
+ )
+ self.register_buffer(
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
+ )
+
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ past_key_values_length: int = 0,
+ ) -> torch.Tensor:
+ if input_ids is not None:
+ input_shape = input_ids.size()
+ else:
+ input_shape = inputs_embeds.size()[:-1]
+
+ seq_length = input_shape[1]
+
+ if position_ids is None:
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
+
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
+ # issue #5664
+ if token_type_ids is None:
+ if hasattr(self, "token_type_ids"):
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
+ token_type_ids = buffered_token_type_ids_expanded
+ else:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
+
+ embeddings = inputs_embeds + token_type_embeddings
+ if self.position_embedding_type == "absolute":
+ position_embeddings = self.position_embeddings(position_ids)
+ embeddings += position_embeddings
+ embeddings = self.LayerNorm(embeddings)
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+
+# Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings with CLIP->ChineseCLIP
+class ChineseCLIPVisionEmbeddings(nn.Module):
+ def __init__(self, config: ChineseCLIPVisionConfig):
+ super().__init__()
+ self.config = config
+ self.embed_dim = config.hidden_size
+ self.image_size = config.image_size
+ self.patch_size = config.patch_size
+
+ self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
+
+ self.patch_embedding = nn.Conv2d(
+ in_channels=config.num_channels,
+ out_channels=self.embed_dim,
+ kernel_size=self.patch_size,
+ stride=self.patch_size,
+ bias=False,
+ )
+
+ self.num_patches = (self.image_size // self.patch_size) ** 2
+ self.num_positions = self.num_patches + 1
+ self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
+ self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
+
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
+ batch_size = pixel_values.shape[0]
+ target_dtype = self.patch_embedding.weight.dtype
+ patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
+
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1)
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
+ embeddings = embeddings + self.position_embedding(self.position_ids)
+ return embeddings
+
+
+# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->ChineseCLIPText
+class ChineseCLIPTextSelfAttention(nn.Module):
+ def __init__(self, config, position_embedding_type=None):
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads})"
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+ self.position_embedding_type = position_embedding_type or getattr(
+ config, "position_embedding_type", "absolute"
+ )
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ self.max_position_embeddings = config.max_position_embeddings
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
+
+ self.is_decoder = config.is_decoder
+
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ mixed_query_layer = self.query(hidden_states)
+
+ # If this is instantiated as a cross-attention module, the keys
+ # and values come from an encoder; the attention mask needs to be
+ # such that the encoder's padding tokens are not attended to.
+ is_cross_attention = encoder_hidden_states is not None
+
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_layer = past_key_value[0]
+ value_layer = past_key_value[1]
+ attention_mask = encoder_attention_mask
+ elif is_cross_attention:
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
+ attention_mask = encoder_attention_mask
+ elif past_key_value is not None:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
+ else:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ use_cache = past_key_value is not None
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_layer, value_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
+ if use_cache:
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
+ -1, 1
+ )
+ else:
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
+ distance = position_ids_l - position_ids_r
+
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
+
+ if self.position_embedding_type == "relative_key":
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores
+ elif self.position_embedding_type == "relative_key_query":
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in ChineseCLIPTextModel forward() function)
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ if self.is_decoder:
+ outputs = outputs + (past_key_value,)
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->ChineseCLIPText
+class ChineseCLIPTextSelfOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->ChineseCLIPText
+class ChineseCLIPTextAttention(nn.Module):
+ def __init__(self, config, position_embedding_type=None):
+ super().__init__()
+ self.self = ChineseCLIPTextSelfAttention(config, position_embedding_type=position_embedding_type)
+ self.output = ChineseCLIPTextSelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.self.query = prune_linear_layer(self.self.query, index)
+ self.self.key = prune_linear_layer(self.self.key, index)
+ self.self.value = prune_linear_layer(self.self.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ self_outputs = self.self(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+ attention_output = self.output(self_outputs[0], hidden_states)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+class ChineseCLIPVisionAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.embed_dim = config.hidden_size
+ self.num_heads = config.num_attention_heads
+ self.head_dim = self.embed_dim // self.num_heads
+ if self.head_dim * self.num_heads != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
+ f" {self.num_heads})."
+ )
+ self.scale = self.head_dim**-0.5
+ self.dropout = config.attention_dropout
+
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ bsz, tgt_len, embed_dim = hidden_states.size()
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scale
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
+ key_states = key_states.view(*proj_shape)
+ value_states = value_states.view(*proj_shape)
+
+ src_len = key_states.size(1)
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
+
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if output_attentions:
+ # this operation is a bit akward, but it's required to
+ # make sure that attn_weights keeps its gradient.
+ # In order to do so, attn_weights have to reshaped
+ # twice and have to be reused in the following
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
+ else:
+ attn_weights_reshaped = None
+
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ attn_output = torch.bmm(attn_probs, value_states)
+
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
+ attn_output = attn_output.transpose(1, 2)
+ attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped
+
+
+# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->ChineseCLIPText
+class ChineseCLIPTextIntermediate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->ChineseCLIPText
+class ChineseCLIPTextOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+# Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->ChineseCLIPVision
+class ChineseCLIPVisionMLP(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.activation_fn = ACT2FN[config.hidden_act]
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.fc1(hidden_states)
+ hidden_states = self.activation_fn(hidden_states)
+ hidden_states = self.fc2(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->ChineseCLIPText
+class ChineseCLIPTextLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.attention = ChineseCLIPTextAttention(config)
+ self.is_decoder = config.is_decoder
+ self.add_cross_attention = config.add_cross_attention
+ if self.add_cross_attention:
+ if not self.is_decoder:
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
+ self.crossattention = ChineseCLIPTextAttention(config, position_embedding_type="absolute")
+ self.intermediate = ChineseCLIPTextIntermediate(config)
+ self.output = ChineseCLIPTextOutput(config)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ self_attention_outputs = self.attention(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ output_attentions=output_attentions,
+ past_key_value=self_attn_past_key_value,
+ )
+ attention_output = self_attention_outputs[0]
+
+ # if decoder, the last output is tuple of self-attn cache
+ if self.is_decoder:
+ outputs = self_attention_outputs[1:-1]
+ present_key_value = self_attention_outputs[-1]
+ else:
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ cross_attn_present_key_value = None
+ if self.is_decoder and encoder_hidden_states is not None:
+ if not hasattr(self, "crossattention"):
+ raise ValueError(
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
+ " by setting `config.add_cross_attention=True`"
+ )
+
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ cross_attention_outputs = self.crossattention(
+ attention_output,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ cross_attn_past_key_value,
+ output_attentions,
+ )
+ attention_output = cross_attention_outputs[0]
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
+
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
+ cross_attn_present_key_value = cross_attention_outputs[-1]
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ layer_output = apply_chunking_to_forward(
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
+ )
+ outputs = (layer_output,) + outputs
+
+ # if decoder, return the attn key/values as the last output
+ if self.is_decoder:
+ outputs = outputs + (present_key_value,)
+
+ return outputs
+
+ def feed_forward_chunk(self, attention_output):
+ intermediate_output = self.intermediate(attention_output)
+ layer_output = self.output(intermediate_output, attention_output)
+ return layer_output
+
+
+class ChineseCLIPVisionLayer(nn.Module):
+ def __init__(self, config: ChineseCLIPConfig):
+ super().__init__()
+ self.embed_dim = config.hidden_size
+ self.self_attn = ChineseCLIPVisionAttention(config)
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
+ self.mlp = ChineseCLIPVisionMLP(config)
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.FloatTensor]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+
+ hidden_states = self.layer_norm1(hidden_states)
+ hidden_states, attn_weights = self.self_attn(
+ hidden_states=hidden_states,
+ output_attentions=output_attentions,
+ )
+ hidden_states = residual + hidden_states
+
+ residual = hidden_states
+ hidden_states = self.layer_norm2(hidden_states)
+ hidden_states = self.mlp(hidden_states)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->ChineseCLIPText
+class ChineseCLIPTextPooler(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.activation = nn.Tanh()
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(first_token_tensor)
+ pooled_output = self.activation(pooled_output)
+ return pooled_output
+
+
+class ChineseCLIPPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = ChineseCLIPConfig
+ base_model_prefix = "chinese_clip"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ factor = self.config.initializer_factor
+ if isinstance(module, ChineseCLIPVisionEmbeddings):
+ factor = self.config.initializer_factor
+ nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor)
+ nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor)
+ nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor)
+ elif isinstance(module, ChineseCLIPTextEmbeddings):
+ nn.init.normal_(module.word_embeddings.weight, mean=0.0, std=self.config.initializer_range)
+ nn.init.normal_(module.position_embeddings.weight, mean=0.0, std=self.config.initializer_range)
+ nn.init.normal_(module.token_type_embeddings.weight, mean=0.0, std=self.config.initializer_range)
+ for embedding in [module.word_embeddings, module.position_embeddings, module.token_type_embeddings]:
+ if embedding.padding_idx is not None:
+ embedding.weight.data[embedding.padding_idx].zero_()
+ elif isinstance(module, ChineseCLIPVisionAttention):
+ factor = self.config.initializer_factor
+ in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
+ out_proj_std = (module.embed_dim**-0.5) * factor
+ nn.init.normal_(module.q_proj.weight, std=in_proj_std)
+ nn.init.normal_(module.k_proj.weight, std=in_proj_std)
+ nn.init.normal_(module.v_proj.weight, std=in_proj_std)
+ nn.init.normal_(module.out_proj.weight, std=out_proj_std)
+ elif isinstance(module, ChineseCLIPVisionMLP):
+ factor = self.config.initializer_factor
+ in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
+ fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
+ nn.init.normal_(module.fc1.weight, std=fc_std)
+ nn.init.normal_(module.fc2.weight, std=in_proj_std)
+ elif isinstance(module, ChineseCLIPModel):
+ nn.init.normal_(
+ module.text_projection.weight,
+ std=module.text_embed_dim**-0.5 * self.config.initializer_factor,
+ )
+ nn.init.normal_(
+ module.visual_projection.weight,
+ std=module.vision_embed_dim**-0.5 * self.config.initializer_factor,
+ )
+
+ if isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+
+
+CHINESE_CLIP_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`ChineseCLIPConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+CHINESE_CLIP_TEXT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+CHINESE_CLIP_VISION_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
+ [`AutoImageProcessor`]. See [`ChineseCLIPImageProcessor.__call__`] for details.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+CHINESE_CLIP_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
+ [`AutoImageProcessor`]. See [`ChineseCLIPImageProcessor.__call__`] for details.
+ return_loss (`bool`, *optional*):
+ Whether or not to return the contrastive loss.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->ChineseCLIPText
+class ChineseCLIPTextEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList([ChineseCLIPTextLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = False,
+ output_hidden_states: Optional[bool] = False,
+ return_dict: Optional[bool] = True,
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ next_decoder_cache = () if use_cache else None
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+ past_key_value = past_key_values[i] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+ if use_cache:
+ next_decoder_cache += (layer_outputs[-1],)
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+ if self.config.add_cross_attention:
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ hidden_states,
+ next_decoder_cache,
+ all_hidden_states,
+ all_self_attentions,
+ all_cross_attentions,
+ ]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_decoder_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+class ChineseCLIPVisionEncoder(nn.Module):
+ """
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
+ [`ChineseCLIPVisionEncoderLayer`].
+
+ Args:
+ config: ChineseCLIPConfig
+ """
+
+ def __init__(self, config: ChineseCLIPConfig):
+ super().__init__()
+ self.config = config
+ self.layers = nn.ModuleList([ChineseCLIPVisionLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ inputs_embeds,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ r"""
+ Args:
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ hidden_states = inputs_embeds
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ encoder_layer.__call__,
+ hidden_states,
+ output_attentions,
+ )
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+
+class ChineseCLIPVisionTransformer(nn.Module):
+ def __init__(self, config: ChineseCLIPVisionConfig):
+ super().__init__()
+ self.config = config
+ embed_dim = config.hidden_size
+
+ self.embeddings = ChineseCLIPVisionEmbeddings(config)
+ self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
+ self.encoder = ChineseCLIPVisionEncoder(config)
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
+
+ @add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=ChineseCLIPVisionConfig)
+ def forward(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
+ r"""
+ Returns:
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ hidden_states = self.embeddings(pixel_values)
+ hidden_states = self.pre_layrnorm(hidden_states)
+
+ encoder_outputs = self.encoder(
+ inputs_embeds=hidden_states,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ last_hidden_state = encoder_outputs[0]
+ pooled_output = last_hidden_state[:, 0, :]
+ pooled_output = self.post_layernorm(pooled_output)
+
+ if not return_dict:
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPooling(
+ last_hidden_state=last_hidden_state,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ "The text model from CHINESE_CLIP without any head or projection on top.",
+ CHINESE_CLIP_START_DOCSTRING,
+)
+class ChineseCLIPTextModel(ChineseCLIPPreTrainedModel):
+ """
+
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
+
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
+ """
+
+ config_class = ChineseCLIPTextConfig
+
+ def __init__(self, config, add_pooling_layer=True):
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = ChineseCLIPTextEmbeddings(config)
+ self.encoder = ChineseCLIPTextEncoder(config)
+
+ self.pooler = ChineseCLIPTextPooler(config) if add_pooling_layer else None
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.word_embeddings = value
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(CHINESE_CLIP_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithPoolingAndCrossAttentions,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
+ r"""
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if self.config.is_decoder:
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ else:
+ use_cache = False
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ batch_size, seq_length = input_shape
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ # past_key_values_length
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+
+ if attention_mask is None:
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
+
+ if token_type_ids is None:
+ if hasattr(self.embeddings, "token_type_ids"):
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
+ token_type_ids = buffered_token_type_ids_expanded
+ else:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
+
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
+ # ourselves in which case we just need to make it broadcastable to all heads.
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
+
+ # If a 2D or 3D attention mask is provided for the cross-attention
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
+ if self.config.is_decoder and encoder_hidden_states is not None:
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
+ if encoder_attention_mask is None:
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
+ else:
+ encoder_extended_attention_mask = None
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ embedding_output = self.embeddings(
+ input_ids=input_ids,
+ position_ids=position_ids,
+ token_type_ids=token_type_ids,
+ inputs_embeds=inputs_embeds,
+ past_key_values_length=past_key_values_length,
+ )
+ encoder_outputs = self.encoder(
+ embedding_output,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_extended_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
+
+ if not return_dict:
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPoolingAndCrossAttentions(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ past_key_values=encoder_outputs.past_key_values,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ cross_attentions=encoder_outputs.cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ """The vision model from CHINESE_CLIP without any head or projection on top.""",
+ CHINESE_CLIP_START_DOCSTRING,
+)
+class ChineseCLIPVisionModel(ChineseCLIPPreTrainedModel):
+ config_class = ChineseCLIPVisionConfig
+ main_input_name = "pixel_values"
+
+ def __init__(self, config: ChineseCLIPVisionConfig):
+ super().__init__(config)
+ self.vision_model = ChineseCLIPVisionTransformer(config)
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self) -> nn.Module:
+ return self.vision_model.embeddings.patch_embedding
+
+ @add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=ChineseCLIPVisionConfig)
+ def forward(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import CLIPProcessor, ChineseCLIPVisionModel
+
+ >>> model = ChineseCLIPVisionModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
+ >>> processor = CLIPProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
+
+ >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> inputs = processor(images=image, return_tensors="pt")
+
+ >>> outputs = model(**inputs)
+ >>> last_hidden_state = outputs.last_hidden_state
+ >>> pooled_output = outputs.pooler_output # pooled CLS states
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ return self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+
+@add_start_docstrings(CHINESE_CLIP_START_DOCSTRING)
+class ChineseCLIPModel(ChineseCLIPPreTrainedModel):
+ config_class = ChineseCLIPConfig
+
+ def __init__(self, config: ChineseCLIPConfig):
+ super().__init__(config)
+
+ if not isinstance(config.text_config, ChineseCLIPTextConfig):
+ raise ValueError(
+ "config.text_config is expected to be of type ChineseCLIPTextConfig but is of type"
+ f" {type(config.text_config)}."
+ )
+
+ if not isinstance(config.vision_config, ChineseCLIPVisionConfig):
+ raise ValueError(
+ "config.vision_config is expected to be of type ChineseCLIPVisionConfig but is of type"
+ f" {type(config.vision_config)}."
+ )
+
+ text_config = config.text_config
+ vision_config = config.vision_config
+
+ self.projection_dim = config.projection_dim
+ self.text_embed_dim = text_config.hidden_size
+ self.vision_embed_dim = vision_config.hidden_size
+
+ self.text_model = ChineseCLIPTextModel(text_config, add_pooling_layer=False)
+ self.vision_model = ChineseCLIPVisionTransformer(vision_config)
+
+ self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False)
+ self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False)
+ self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(CHINESE_CLIP_TEXT_INPUTS_DOCSTRING)
+ def get_text_features(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> torch.FloatTensor:
+ r"""
+ Returns:
+ text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
+ applying the projection layer to the final [CLS] hidden state of Text-Transformer.
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, ChineseCLIPModel
+
+ >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
+ >>> tokenizer = AutoTokenizer.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
+
+ >>> inputs = tokenizer(["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], padding=True, return_tensors="pt")
+ >>> text_features = model.get_text_features(**inputs)
+ >>> text_features = text_features / text_features.norm(p=2, dim=-1, keepdim=True)
+ ```"""
+ # Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components.
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ text_outputs = self.text_model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = text_outputs[0][:, 0, :]
+ text_features = self.text_projection(pooled_output)
+
+ return text_features
+
+ @add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING)
+ def get_image_features(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> torch.FloatTensor:
+ r"""
+ Returns:
+ image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
+ applying the projection layer to the final [CLS] hidden state of Vision-Transformer.
+
+ Examples:
+
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, ChineseCLIPModel
+
+ >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
+ >>> processor = AutoProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
+
+ >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> inputs = processor(images=image, return_tensors="pt")
+
+ >>> image_features = model.get_image_features(**inputs)
+ >>> image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True)
+ ```"""
+ # Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components.
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ vision_outputs = self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = vision_outputs[1] # pooled_output
+ image_features = self.visual_projection(pooled_output)
+
+ return image_features
+
+ @add_start_docstrings_to_model_forward(CHINESE_CLIP_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=ChineseCLIPOutput, config_class=ChineseCLIPConfig)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ return_loss: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, ChineseCLIPOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, ChineseCLIPModel
+
+ >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
+ >>> processor = AutoProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16")
+
+ >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> inputs = processor(text=["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], images=image, return_tensors="pt", padding=True)
+
+ >>> outputs = model(**inputs)
+ >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
+ >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
+ ```"""
+ # Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components.
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ vision_outputs = self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ text_outputs = self.text_model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ image_embeds = vision_outputs[1]
+ image_embeds = self.visual_projection(image_embeds)
+
+ text_embeds = text_outputs[0][:, 0, :]
+ text_embeds = self.text_projection(text_embeds)
+
+ # normalized features
+ image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True)
+ text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
+
+ # cosine similarity as logits
+ logit_scale = self.logit_scale.exp()
+ logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale
+ logits_per_image = logits_per_text.t()
+
+ loss = None
+ if return_loss:
+ loss = chinese_clip_loss(logits_per_text)
+
+ if not return_dict:
+ # fix the None pooled_output of text_outputs to conform with dict_output
+ pooled_output = text_outputs[1]
+ if pooled_output is None:
+ text_outputs = (text_outputs[0],) + text_outputs[2:]
+ output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
+ return ((loss,) + output) if loss is not None else output
+
+ return ChineseCLIPOutput(
+ loss=loss,
+ logits_per_image=logits_per_image,
+ logits_per_text=logits_per_text,
+ text_embeds=text_embeds,
+ image_embeds=image_embeds,
+ text_model_output=text_outputs,
+ vision_model_output=vision_outputs,
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/processing_chinese_clip.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/processing_chinese_clip.py
new file mode 100644
index 0000000000000000000000000000000000000000..832f44102abf32e7a5cb0b7f04cda0faea80ded0
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/chinese_clip/processing_chinese_clip.py
@@ -0,0 +1,142 @@
+# coding=utf-8
+# Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Image/Text processor class for Chinese-CLIP
+"""
+
+import warnings
+
+from ...processing_utils import ProcessorMixin
+from ...tokenization_utils_base import BatchEncoding
+
+
+class ChineseCLIPProcessor(ProcessorMixin):
+ r"""
+ Constructs a Chinese-CLIP processor which wraps a Chinese-CLIP image processor and a Chinese-CLIP tokenizer into a
+ single processor.
+
+ [`ChineseCLIPProcessor`] offers all the functionalities of [`ChineseCLIPImageProcessor`] and [`BertTokenizerFast`].
+ See the [`~ChineseCLIPProcessor.__call__`] and [`~ChineseCLIPProcessor.decode`] for more information.
+
+ Args:
+ image_processor ([`ChineseCLIPImageProcessor`], *optional*):
+ The image processor is a required input.
+ tokenizer ([`BertTokenizerFast`], *optional*):
+ The tokenizer is a required input.
+ """
+
+ attributes = ["image_processor", "tokenizer"]
+ image_processor_class = "ChineseCLIPImageProcessor"
+ tokenizer_class = ("BertTokenizer", "BertTokenizerFast")
+
+ def __init__(self, image_processor=None, tokenizer=None, **kwargs):
+ feature_extractor = None
+ if "feature_extractor" in kwargs:
+ warnings.warn(
+ "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
+ " instead.",
+ FutureWarning,
+ )
+ feature_extractor = kwargs.pop("feature_extractor")
+
+ image_processor = image_processor if image_processor is not None else feature_extractor
+ if image_processor is None:
+ raise ValueError("You need to specify an `image_processor`.")
+ if tokenizer is None:
+ raise ValueError("You need to specify a `tokenizer`.")
+
+ super().__init__(image_processor, tokenizer)
+ self.current_processor = self.image_processor
+
+ def __call__(self, text=None, images=None, return_tensors=None, **kwargs):
+ """
+ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
+ and `kwargs` arguments to BertTokenizerFast's [`~BertTokenizerFast.__call__`] if `text` is not `None` to encode
+ the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
+ CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
+ of the above two methods for more information.
+
+ Args:
+ text (`str`, `List[str]`, `List[List[str]]`):
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
+ tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a
+ number of channels, H and W are image height and width.
+
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
+ If set, will return tensors of a particular framework. Acceptable values are:
+
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
+ - `'np'`: Return NumPy `np.ndarray` objects.
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
+
+ Returns:
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
+
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
+ `None`).
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
+ """
+
+ if text is None and images is None:
+ raise ValueError("You have to specify either text or images. Both cannot be none.")
+
+ if text is not None:
+ encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs)
+
+ if images is not None:
+ image_features = self.image_processor(images, return_tensors=return_tensors, **kwargs)
+
+ if text is not None and images is not None:
+ encoding["pixel_values"] = image_features.pixel_values
+ return encoding
+ elif text is not None:
+ return encoding
+ else:
+ return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)
+
+ def batch_decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
+ refer to the docstring of this method for more information.
+ """
+ return self.tokenizer.batch_decode(*args, **kwargs)
+
+ def decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
+ the docstring of this method for more information.
+ """
+ return self.tokenizer.decode(*args, **kwargs)
+
+ @property
+ def model_input_names(self):
+ tokenizer_input_names = self.tokenizer.model_input_names
+ image_processor_input_names = self.image_processor.model_input_names
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
+
+ @property
+ def feature_extractor_class(self):
+ warnings.warn(
+ "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
+ FutureWarning,
+ )
+ return self.image_processor_class
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..567be97b7cd8631e71367e713dc2f0ef23bd76f5
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__init__.py
@@ -0,0 +1,56 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
+
+
+_import_structure = {
+ "configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
+ "feature_extraction_mctct": ["MCTCTFeatureExtractor"],
+ "processing_mctct": ["MCTCTProcessor"],
+}
+
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_mctct"] = [
+ "MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "MCTCTForCTC",
+ "MCTCTModel",
+ "MCTCTPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
+ from .feature_extraction_mctct import MCTCTFeatureExtractor
+ from .processing_mctct import MCTCTProcessor
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3b8bd48a6c8288a70754d41f40492aa1eddb3048
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/configuration_mctct.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/configuration_mctct.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4ad53f6b9a0580746629e135e365104ac734de14
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/configuration_mctct.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/feature_extraction_mctct.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/feature_extraction_mctct.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..010445cfbdbbcd88de81831c5f1b66be1b6f79b5
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/feature_extraction_mctct.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/modeling_mctct.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/modeling_mctct.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f876e4378f000b655fe440419fe81dc9094926bd
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/modeling_mctct.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/processing_mctct.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/processing_mctct.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e897e753197990286fbaa8e52fd425cae2c62f2b
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/processing_mctct.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/configuration_mctct.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/configuration_mctct.py
new file mode 100644
index 0000000000000000000000000000000000000000..9d4eab0d3f3d4a1b6679585c119b013bb45be0e5
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/configuration_mctct.py
@@ -0,0 +1,186 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""M-CTC-T model configuration"""
+
+from ....configuration_utils import PretrainedConfig
+from ....utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
+ # See all M-CTC-T models at https://huggingface.co/models?filter=mctct
+}
+
+
+class MCTCTConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`MCTCTModel`]. It is used to instantiate an
+ M-CTC-T model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the M-CTC-T
+ [speechbrain/m-ctc-t-large](https://huggingface.co/speechbrain/m-ctc-t-large) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 8065):
+ Vocabulary size of the M-CTC-T model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`MCTCTModel`].
+ hidden_size (`int`, *optional*, defaults to 1536):
+ Dimension of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 36):
+ Number of hidden layers in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 6144):
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 4):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ attention_head_dim (`int`, *optional*, defaults to 384):
+ Dimensions of each attention head for each attention layer in the Transformer encoder.
+ max_position_embeddings (`int`, *optional*, defaults to 920):
+ The maximum sequence length that this model might ever be used with (after log-mel spectrogram extraction).
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
+ The epsilon used by the layer normalization layers.
+ layerdrop (`float`, *optional*, defaults to 0.3):
+ The probability of dropping an encoder layer during training. The default 0.3 value is used in the original
+ implementation.
+ hidden_act (`str` or `function`, *optional*, defaults to `"relu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.3):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.3):
+ The dropout ratio for the attention probabilities.
+ pad_token_id (`int`, *optional*, defaults to 1):
+ The tokenizer index of the pad token.
+ bos_token_id (`int`, *optional*, defaults to 0):
+ The tokenizer index of the bos token.
+ eos_token_id (`int`, *optional*, defaults to 2):
+ The tokenizer index of the eos token.
+ conv_glu_dim (`int`, *optional*, defaults to 1):
+ The dimension of the output of the `Conv1dSubsampler` layer in which GLU is applied on. Though the original
+ Flashlight code uses the value of 2, here it's adapted to 1 due to transposition differences.
+ conv_dropout (`int`, *optional*, defaults to 0.3):
+ The probability of randomly dropping the `Conv1dSubsampler` layer during training.
+ num_conv_layers (`int`, *optional*, defaults to 1):
+ Number of convolution layers before applying transformer encoder layers.
+ conv_kernel (`Sequence[int]`, *optional*, defaults to `(7,)`):
+ The kernel size of the 1D convolution applied before transformer layers. `len(conv_kernel)` must be equal
+ to `num_conv_layers`.
+ conv_stride (`Sequence[int]`, *optional*, defaults to `(3,)`):
+ The stride length of the 1D convolution applied before transformer layers. `len(conv_stride)` must be equal
+ to `num_conv_layers`.
+ input_feat_per_channel (`int`, *optional*, defaults to 80):
+ Feature dimensions of the channels of the input to the Conv1D layer.
+ input_channels (`int`, *optional*, defaults to 1):
+ Number of input channels of the input to the Conv1D layer.
+ conv_channels (`List[int]`, *optional*):
+ Channel sizes of intermediate Conv1D layers.
+ ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`):
+ Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an
+ instance of [`MCTCTForCTC`].
+ ctc_zero_infinity (`bool`, *optional*, defaults to `False`):
+ Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly
+ occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance
+ of [`MCTCTForCTC`].
+
+ Example:
+
+ ```python
+ >>> from transformers import MCTCTConfig, MCTCTModel
+
+ >>> # Initializing a M-CTC-T mctct-large style configuration
+ >>> configuration = MCTCTConfig()
+
+ >>> # Initializing a model (with random weights) from the mctct-large style configuration
+ >>> model = MCTCTModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "mctct"
+
+ def __init__(
+ self,
+ vocab_size=8065,
+ hidden_size=1536,
+ num_hidden_layers=36,
+ intermediate_size=6144,
+ num_attention_heads=4,
+ attention_head_dim=384,
+ max_position_embeddings=920,
+ layer_norm_eps=1e-5,
+ layerdrop=0.3,
+ hidden_act="relu",
+ initializer_range=0.02,
+ hidden_dropout_prob=0.3,
+ attention_probs_dropout_prob=0.3,
+ pad_token_id=1,
+ bos_token_id=0,
+ eos_token_id=2,
+ conv_glu_dim=1,
+ conv_dropout=0.3,
+ num_conv_layers=1,
+ conv_kernel=(7,),
+ conv_stride=(3,),
+ input_feat_per_channel=80,
+ input_channels=1,
+ conv_channels=None,
+ ctc_loss_reduction="sum",
+ ctc_zero_infinity=False,
+ **kwargs,
+ ):
+ super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.intermediate_size = intermediate_size
+ self.num_attention_heads = num_attention_heads
+ self.attention_head_dim = attention_head_dim
+ self.max_position_embeddings = max_position_embeddings
+ self.layer_norm_eps = layer_norm_eps
+ self.layerdrop = layerdrop
+ self.hidden_act = hidden_act
+ self.initializer_range = initializer_range
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.pad_token_id = pad_token_id
+ self.bos_token_id = bos_token_id
+ self.eos_token_id = eos_token_id
+ self.conv_glu_dim = conv_glu_dim
+ self.conv_dropout = conv_dropout
+ self.num_conv_layers = num_conv_layers
+ self.input_feat_per_channel = input_feat_per_channel
+ self.input_channels = input_channels
+ self.conv_channels = conv_channels
+ self.ctc_loss_reduction = ctc_loss_reduction
+ self.ctc_zero_infinity = ctc_zero_infinity
+
+ # prevents config testing fail with exporting to json
+ self.conv_kernel = list(conv_kernel)
+ self.conv_stride = list(conv_stride)
+
+ if len(self.conv_kernel) != self.num_conv_layers:
+ raise ValueError(
+ "Configuration for convolutional module is incorrect. "
+ "It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
+ f"but is `len(config.conv_kernel) = {len(self.conv_kernel)}`, "
+ f"`config.num_conv_layers = {self.num_conv_layers}`."
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/feature_extraction_mctct.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/feature_extraction_mctct.py
new file mode 100644
index 0000000000000000000000000000000000000000..e1e17c4b12f91dc25284e30a70388137e52ab82b
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/feature_extraction_mctct.py
@@ -0,0 +1,288 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Feature extractor class for M-CTC-T
+"""
+
+from typing import List, Optional, Union
+
+import numpy as np
+
+from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
+from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
+from ....feature_extraction_utils import BatchFeature
+from ....file_utils import PaddingStrategy, TensorType
+from ....utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class MCTCTFeatureExtractor(SequenceFeatureExtractor):
+ r"""
+ Constructs a M-CTC-T feature extractor.
+
+ This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
+ most of the main methods. Users should refer to this superclass for more information regarding those methods. This
+ code has been adapted from Flashlight's C++ code. For more information about the implementation, one can refer to
+ this [notebook](https://colab.research.google.com/drive/1GLtINkkhzms-IsdcGy_-tVCkv0qNF-Gt#scrollTo=pMCRGMmUC_an)
+ that takes the user step-by-step in the implementation.
+
+ Args:
+ feature_size (`int`, defaults to 80):
+ The feature dimension of the extracted features. This is the number of mel_frequency
+ sampling_rate (`int`, defaults to 16000):
+ The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
+ padding_value (`float`, defaults to 0.0):
+ The value that is used to fill the padding values.
+ hop_length (`int`, defaults to 10):
+ Number of audio samples between windows. Otherwise referred to as "shift" in many papers.
+ win_length (`int`, defaults to 25):
+ Number of ms per window
+ win_function (`str`, defaults to `"hamming_window"`):
+ Name for the window function used for windowing, must be accessible via `torch.{win_function}`
+ frame_signal_scale (`float`, defaults to 32768.0):
+ Constant multiplied in creating the frames before applying DFT.
+ preemphasis_coeff (`float`, defaults to 0.97):
+ Constant multiplied in applying Pre-emphasis before DFT.
+ mel_floor (`float` defaults to 1.0):
+ Minimum value of mel frequency banks.
+ normalize_means (`bool`, *optional*, defaults to `True`):
+ Whether or not to zero-mean normalize the extracted features.
+ normalize_vars (`bool`, *optional*, defaults to `True`):
+ Whether or not to unit-variance normalize the extracted features.
+ """
+
+ model_input_names = ["input_features", "attention_mask"]
+
+ def __init__(
+ self,
+ feature_size=80,
+ sampling_rate=16000,
+ padding_value=0.0,
+ hop_length=10,
+ win_length=25,
+ win_function="hamming_window",
+ frame_signal_scale=32768.0,
+ preemphasis_coeff=0.97,
+ mel_floor=1.0,
+ normalize_means=True,
+ normalize_vars=True,
+ return_attention_mask=False,
+ **kwargs,
+ ):
+ super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
+
+ self.feature_size = feature_size
+ self.sampling_rate = sampling_rate
+ self.padding_value = padding_value
+ self.hop_length = hop_length
+ self.win_length = win_length
+ self.frame_signal_scale = frame_signal_scale
+ self.preemphasis_coeff = preemphasis_coeff
+ self.mel_floor = mel_floor
+ self.normalize_means = normalize_means
+ self.normalize_vars = normalize_vars
+ self.win_function = win_function
+ self.return_attention_mask = return_attention_mask
+
+ self.sample_size = win_length * sampling_rate // 1000
+ self.sample_stride = hop_length * sampling_rate // 1000
+
+ self.n_fft = optimal_fft_length(self.sample_size)
+ self.n_freqs = (self.n_fft // 2) + 1
+
+ def _extract_mfsc_features(self, one_waveform: np.array) -> np.ndarray:
+ """
+ Extracts MFSC Features for one waveform vector (unbatched). Adapted from Flashlight's C++ MFSC code.
+ """
+ if self.win_function == "hamming_window":
+ window = window_function(window_length=self.sample_size, name=self.win_function, periodic=False)
+ else:
+ window = window_function(window_length=self.sample_size, name=self.win_function)
+
+ fbanks = mel_filter_bank(
+ num_frequency_bins=self.n_freqs,
+ num_mel_filters=self.feature_size,
+ min_frequency=0.0,
+ max_frequency=self.sampling_rate / 2.0,
+ sampling_rate=self.sampling_rate,
+ )
+
+ msfc_features = spectrogram(
+ one_waveform * self.frame_signal_scale,
+ window=window,
+ frame_length=self.sample_size,
+ hop_length=self.sample_stride,
+ fft_length=self.n_fft,
+ center=False,
+ preemphasis=self.preemphasis_coeff,
+ mel_filters=fbanks,
+ mel_floor=self.mel_floor,
+ log_mel="log",
+ )
+ return msfc_features.T
+
+ def _normalize_one(self, x, input_length, padding_value):
+ # make sure we normalize float32 arrays
+ if self.normalize_means:
+ mean = x[:input_length].mean(axis=0)
+ x = np.subtract(x, mean)
+ if self.normalize_vars:
+ std = x[:input_length].std(axis=0)
+ x = np.divide(x, std)
+
+ if input_length < x.shape[0]:
+ x[input_length:] = padding_value
+
+ # make sure array is in float32
+ x = x.astype(np.float32)
+
+ return x
+
+ def normalize(
+ self, input_features: List[np.ndarray], attention_mask: Optional[np.ndarray] = None
+ ) -> List[np.ndarray]:
+ lengths = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
+ return [self._normalize_one(x, n, self.padding_value) for x, n in zip(input_features, lengths)]
+
+ def __call__(
+ self,
+ raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
+ padding: Union[bool, str, PaddingStrategy] = False,
+ max_length: Optional[int] = None,
+ truncation: bool = False,
+ pad_to_multiple_of: Optional[int] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ sampling_rate: Optional[int] = None,
+ **kwargs,
+ ) -> BatchFeature:
+ """
+ Main method to featurize and prepare for the model one or several sequence(s). sequences. It returns the
+ log-mel spectrogram of the input audio, as implemented in the original Flashlight MFSC feature extraction code.
+
+ Args:
+ raw_speech (`torch.Tensor`, `np.ndarray`, `List[float]`, `List[torch.Tensor]`, `List[np.ndarray]`, `List[List[float]]`):
+ The sequence or batch of sequences to be padded. Each sequence can be a tensor, a numpy array, a list
+ of float values, a list of tensors, a list of numpy arrays or a list of list of float values. Must be
+ mono channel audio, not stereo, i.e. single float per timestep.
+ padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
+ index) among:
+
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
+ sequence if provided).
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
+ acceptable input length for the model if that argument is not provided.
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
+ lengths).
+ max_length (`int`, *optional*):
+ Maximum length of the returned list and optionally padding length (see above).
+ truncation (`bool`):
+ Activates truncation to cut input sequences longer than *max_length* to *max_length*.
+ pad_to_multiple_of (`int`, *optional*):
+ If set will pad the sequence to a multiple of the provided value.
+
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
+ `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
+ return_attention_mask (`bool`, *optional*):
+ Whether to return the attention mask. If left to the default, will return the attention mask according
+ to the specific feature_extractor's default.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
+ If set, will return tensors instead of list of python integers. Acceptable values are:
+
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
+ - `'np'`: Return Numpy `np.ndarray` objects.
+ sampling_rate (`int`, *optional*):
+ The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
+ `sampling_rate` at the forward call to prevent silent errors.
+ padding_value (`float`, defaults to 0.0):
+ """
+
+ if sampling_rate is not None:
+ if sampling_rate != self.sampling_rate:
+ raise ValueError(
+ f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
+ f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
+ f" {self.sampling_rate} and not {sampling_rate}."
+ )
+ else:
+ logger.warning(
+ "It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
+ "Failing to do so can result in silent errors that might be hard to debug."
+ )
+
+ is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1
+ if is_batched_numpy and len(raw_speech.shape) > 2:
+ raise ValueError(f"Only mono-channel audio is supported for input to {self}")
+ is_batched = is_batched_numpy or (
+ isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list)))
+ )
+
+ if is_batched:
+ raw_speech = [np.asarray(speech, dtype=np.float32) for speech in raw_speech]
+ elif not is_batched and not isinstance(raw_speech, np.ndarray):
+ raw_speech = np.asarray(raw_speech, dtype=np.float32)
+ elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64):
+ raw_speech = raw_speech.astype(np.float32)
+
+ # always return batch
+ if not is_batched:
+ raw_speech = [raw_speech]
+
+ # extract fbank features
+ features = [self._extract_mfsc_features(one_waveform) for one_waveform in raw_speech]
+
+ # convert into correct format for padding
+ encoded_inputs = BatchFeature({"input_features": features})
+
+ padded_inputs = self.pad(
+ encoded_inputs,
+ padding=padding,
+ max_length=max_length,
+ truncation=truncation,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_attention_mask=True,
+ **kwargs,
+ )
+ # make sure list is in array format
+ input_features = padded_inputs.get("input_features")
+ if isinstance(input_features[0], list):
+ padded_inputs["input_features"] = [np.asarray(feature, dtype=np.float32) for feature in input_features]
+
+ attention_mask = padded_inputs.get("attention_mask")
+ if attention_mask is not None:
+ padded_inputs["attention_mask"] = [np.asarray(array, dtype=np.int32) for array in attention_mask]
+
+ if self.normalize_means or self.normalize_vars:
+ attention_mask = (
+ np.array(attention_mask, dtype=np.int32)
+ if self._get_padding_strategies(padding, max_length=max_length) is not PaddingStrategy.DO_NOT_PAD
+ and padding
+ else None
+ )
+ padded_inputs["input_features"] = self.normalize(
+ padded_inputs["input_features"], attention_mask=attention_mask
+ )
+
+ if return_tensors is not None:
+ padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
+
+ return padded_inputs
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/modeling_mctct.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/modeling_mctct.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb3186c9dd37b8d69eb6bf18186e9258d9ffc306
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/modeling_mctct.py
@@ -0,0 +1,795 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch M-CTC-T model."""
+
+
+import math
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+
+from ....activations import ACT2FN
+from ....file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
+from ....integrations.deepspeed import is_deepspeed_zero3_enabled
+from ....modeling_attn_mask_utils import _prepare_4d_attention_mask
+from ....modeling_outputs import BaseModelOutput, CausalLMOutput
+from ....modeling_utils import (
+ PreTrainedModel,
+ apply_chunking_to_forward,
+ find_pruneable_heads_and_indices,
+ prune_linear_layer,
+)
+from ....utils import logging
+from .configuration_mctct import MCTCTConfig
+
+
+logger = logging.get_logger(__name__)
+
+_HIDDEN_STATES_START_POSITION = 1
+
+_CONFIG_FOR_DOC = "MCTCTConfig"
+
+# Base docstring
+_CHECKPOINT_FOR_DOC = "speechbrain/m-ctc-t-large"
+_EXPECTED_OUTPUT_SHAPE = [1, 195, 1536]
+
+# CTC docstring
+_CTC_EXPECTED_OUTPUT = '"Mr. Quilter is the apostle of the middle classes, and we\'re glad to welcome his gospel."'
+_CTC_EXPECTED_LOSS = 1885.65
+
+
+MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "speechbrain/m-ctc-t-large",
+ # See all M-CTC-T models at https://huggingface.co/models?filter=mctct
+]
+
+
+class MCTCTConv1dSubsampler(nn.Module):
+ """
+ Convolutional subsampler: a stack of 1D convolution (along temporal dimension) followed by non-linear activation
+ via gated linear units (https://arxiv.org/abs/1911.08460)
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.glu_dim = config.conv_glu_dim
+
+ self.dropout = nn.Dropout(config.conv_dropout)
+
+ self.num_layers = config.num_conv_layers
+ self.in_channels = config.input_feat_per_channel * config.input_channels
+
+ if self.num_layers > 1:
+ if config.conv_channels is None:
+ raise ValueError(
+ "Need to specify `conv_channels` configuration in `MCTCTConfig` to use multiple convolution"
+ " layers."
+ )
+
+ self.mid_channels = config.conv_channels
+ else:
+ self.mid_channels = None
+
+ self.out_channels = config.hidden_size * 2 # considering GLU halving
+ self.kernel_size = config.conv_kernel
+ self.stride = config.conv_stride
+
+ # NOTE: MCTCT by construction only uses one convolution kernel. I've made this flexible to allow for
+ # multiple layers of convolutions, but not sure if this model definition should just restrict it
+ # to one layer. This becomes especially relevant when considering the padding like line 1 of forward().
+ self.conv_layers = nn.ModuleList(
+ nn.Conv1d(
+ self.in_channels if i == 0 else self.mid_channels[i],
+ self.mid_channels[i] if i < self.num_layers - 1 else self.out_channels,
+ kernel_size=k,
+ stride=self.stride[i],
+ padding="valid",
+ )
+ for i, k in enumerate(self.kernel_size)
+ )
+
+ def forward(self, input_features):
+ # NOTE: in reference to the NOTE in __init__, right now it just calculates padding as if
+ # there will be just one conv layer.
+ padding = sum([size // 2 for size in self.kernel_size]) # (7, 7) -> (3, 3)
+
+ input_features = torch.nn.functional.pad(input_features, (0, 0, padding, padding), "constant", 0)
+ hidden_states = input_features.transpose(1, 2).contiguous() # -> Batch x Frame x Time
+ for conv in self.conv_layers:
+ hidden_states = conv(hidden_states)
+ hidden_states = nn.functional.glu(hidden_states, dim=self.glu_dim)
+ hidden_states = self.dropout(hidden_states)
+
+ hidden_states = hidden_states.transpose(1, 2).contiguous() # -> Batch x Time x Frame
+ return hidden_states
+
+
+class MCTCTEmbeddings(nn.Module):
+ """Construct the embeddings from word, position and token_type embeddings."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
+
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
+ # any TensorFlow checkpoint file
+ # self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.LayerNorm = MCTCTLayerNorm()
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
+ )
+ self.register_buffer(
+ "token_type_ids",
+ torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device),
+ persistent=False,
+ )
+
+ def forward(
+ self, input_features=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
+ ):
+ input_shape = input_features.size() if input_features is not None else inputs_embeds.size()[:-1]
+
+ seq_length = input_shape[1]
+
+ if position_ids is None:
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
+
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
+ # issue #5664
+ if token_type_ids is None:
+ if hasattr(self, "token_type_ids"):
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
+ token_type_ids = buffered_token_type_ids_expanded
+ else:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_features)
+
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
+
+ embeddings = inputs_embeds + token_type_embeddings
+
+ embeddings = self.LayerNorm(embeddings)
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+
+class MCTCTSelfAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads})"
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = config.attention_head_dim
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+
+ self.max_position_embeddings = config.max_position_embeddings
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
+
+ self.is_decoder = config.is_decoder
+
+ def transpose_for_scores(self, x):
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(*new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def reshape_fortran(self, x, shape):
+ if len(x.shape) > 0:
+ x = x.permute(*reversed(range(len(x.shape))))
+ return x.reshape(*reversed(shape)).permute(*reversed(range(len(shape))))
+
+ def relative_position_embedding_rotate(self, scores):
+ # NOTE: should re-evaluate whether this re-implementation was truly necessary
+ # or the reason why my complete re-haul worked was due to some other part
+ # of the code. Adding this and the reshape fortrain code seems very undesirable.
+ scores = scores.permute(0, 2, 3, 1) # e.g. [10, 1839, 14, 4]
+
+ batch, hidden_state, seq_len, heads = scores.shape
+
+ # e.g. [10, 1853, 14, 4]
+ scores = torch.cat((scores, torch.zeros((batch, seq_len, seq_len, heads), device=scores.device)), dim=1)
+
+ # e.g. [10, 25942, 1, 4]
+ scores = self.reshape_fortran(scores, [batch, (hidden_state + seq_len) * seq_len, 1, heads])
+
+ # e.g. [10, 25928, 1, 4]
+ scores = scores[:, : (seq_len + hidden_state - 1) * seq_len]
+
+ # e.g. [10, 1852, 14, 4]
+ scores = self.reshape_fortran(scores, [batch, hidden_state + seq_len - 1, seq_len, heads])
+
+ halfpoint = hidden_state // 2
+ scores = scores[:, halfpoint : halfpoint + seq_len].transpose(1, 2) # e.g. [10, 14, 14, 4]
+
+ return scores.permute(0, 3, 1, 2)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ output_attentions=False,
+ ):
+ mixed_query_layer = self.query(hidden_states)
+ mixed_query_layer = mixed_query_layer / math.sqrt(self.attention_head_size)
+
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ # relative key position embeddings
+ positional_embedding = self.distance_embedding.weight
+ relative_position_scores = torch.einsum("lh, bche -> bcle", positional_embedding, query_layer.transpose(2, 3))
+
+ relative_position_scores = self.relative_position_embedding_rotate(relative_position_scores)
+ attention_scores = attention_scores + relative_position_scores
+
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in MCTCTModel forward() function)
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).flatten(start_dim=-2)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ return outputs
+
+
+class MCTCTLayerNorm(nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.singleton_weight = nn.Parameter(torch.ones(1))
+ self.singleton_bias = nn.Parameter(torch.zeros(1))
+
+ def forward(self, hidden_states):
+ return (hidden_states * self.singleton_weight) + self.singleton_bias
+
+
+class MCTCTSelfOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size, bias=False)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states, input_tensor):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+class MCTCTAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.self = MCTCTSelfAttention(config)
+ self.output = MCTCTSelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.self.query = prune_linear_layer(self.self.query, index)
+ self.self.key = prune_linear_layer(self.self.key, index)
+ self.self.value = prune_linear_layer(self.self.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ output_attentions=False,
+ ):
+ self_outputs = self.self(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ output_attentions,
+ )
+ attention_output = self.output(self_outputs[0], hidden_states)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+
+ return outputs
+
+
+class MCTCTIntermediate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size, bias=False)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+class MCTCTOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size, bias=False)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states, input_tensor):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+class MCTCTLayer(nn.Module):
+ def __init__(self, config: MCTCTConfig):
+ super().__init__()
+
+ self.seq_len_dim = 1
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+
+ self.intermediate = MCTCTIntermediate(config)
+ self.attention = MCTCTAttention(config)
+ self.is_decoder = config.is_decoder
+ self.output = MCTCTOutput(config)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ output_attentions=False,
+ ):
+ self_attention_outputs = self.attention(
+ hidden_states, attention_mask, head_mask, output_attentions=output_attentions
+ )
+ attention_output = self_attention_outputs[0]
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ layer_output = apply_chunking_to_forward(
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
+ )
+
+ outputs = (layer_output,) + outputs
+
+ return outputs
+
+ def feed_forward_chunk(self, attention_output):
+ intermediate_output = self.intermediate(attention_output)
+ layer_output = self.output(intermediate_output, attention_output)
+ return layer_output
+
+
+class MCTCTPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = MCTCTConfig
+ base_model_prefix = "mctct"
+ main_input_name = "input_features"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ std = self.config.initializer_range
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+ elif isinstance(module, MCTCTLayerNorm):
+ module.singleton_weight.data.fill_(1.0)
+ module.singleton_bias.data.zero_()
+ if isinstance(module, (nn.Linear, nn.Conv1d)):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+
+ def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
+ """
+ Computes the output length of the convolutional layers
+ """
+ dilation = 1
+ for _, kernel_sz, stride in zip(
+ range(self.config.num_conv_layers), self.config.conv_kernel, self.config.conv_stride
+ ):
+ padding = kernel_sz // 2
+ input_lengths = input_lengths + 2 * padding - dilation * (kernel_sz - 1) - 1
+ input_lengths = torch.div(input_lengths, stride, rounding_mode="trunc") + 1
+
+ return input_lengths
+
+ def _get_feature_vector_attention_mask(self, feature_vector_length, attention_mask):
+ # generate creates 3D attention mask, because of the shape of input_features
+ # convert it to 2D if thats the case
+ if len(attention_mask.shape) > 2:
+ attention_mask = attention_mask[:, :, -1]
+
+ # subsampled_lengths = attention_mask.sum(-1)
+ subsampled_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1))
+ bsz = attention_mask.size()[0]
+ attention_mask = torch.zeros(
+ (bsz, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
+ )
+
+ # these two operations makes sure that all values
+ # before the output lengths indices are attended to
+ attention_mask[(torch.arange(bsz, device=attention_mask.device), subsampled_lengths - 1)] = 1
+ attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).long()
+ return attention_mask
+
+
+MCTCT_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`MCTCTConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+MCTCT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_features (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`Wav2Vec2CTCTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+class MCTCTEncoder(MCTCTPreTrainedModel):
+ def __init__(self, config: MCTCTConfig):
+ super().__init__(config)
+ self.hidden_dropout_prob = config.hidden_dropout_prob
+
+ self.layer_norm = MCTCTLayerNorm()
+ self.conv = MCTCTConv1dSubsampler(config)
+ self.layers = nn.ModuleList([MCTCTLayer(config) for _ in range(config.num_hidden_layers)])
+
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ input_features: torch.Tensor,
+ attention_mask: torch.Tensor,
+ head_mask: torch.Tensor,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ) -> Union[Tuple, BaseModelOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ input_features = self.layer_norm(input_features)
+
+ inputs_embeds = self.conv(input_features)
+
+ # subsample attention mask if necessary
+ if attention_mask is not None:
+ attention_mask = self._get_feature_vector_attention_mask(inputs_embeds.shape[1], attention_mask)
+
+ hidden_states = nn.functional.dropout(inputs_embeds, p=self.hidden_dropout_prob, training=self.training)
+
+ # expand attention_mask
+ if attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ # check if head_mask has a correct number of layers specified if desired
+ if head_mask is not None:
+ if head_mask.size()[0] != len(self.layers):
+ raise ValueError(
+ f"The head_mask should be specified for {len(self.layers)} layers, "
+ f"but it is for {head_mask.size()[0]}."
+ )
+
+ deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ dropout_probability = torch.rand([])
+
+ skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
+ if not skip_the_layer or deepspeed_zero3_is_enabled:
+ # under deepspeed zero3 all gpus must run in sync
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ encoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ (head_mask[idx] if head_mask is not None else None),
+ output_attentions,
+ )
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if skip_the_layer:
+ layer_outputs = (None, None)
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+
+@add_start_docstrings(
+ "The bare M-CTC-T Model transformer outputting raw hidden-states without any specific head on top.",
+ MCTCT_START_DOCSTRING,
+)
+class MCTCTModel(MCTCTPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.config = config
+
+ self.encoder = MCTCTEncoder(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(MCTCT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ modality="audio",
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
+ )
+ def forward(
+ self,
+ input_features: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_features is None:
+ raise ValueError("You have to specify input_features.")
+
+ encoder_outputs = self.encoder(
+ input_features,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+
+ if not return_dict:
+ return (sequence_output,) + encoder_outputs[1:]
+
+ return BaseModelOutput(
+ last_hidden_state=sequence_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """MCTCT Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""",
+ MCTCT_START_DOCSTRING,
+)
+class MCTCTForCTC(MCTCTPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.mctct = MCTCTModel(config)
+
+ if config.vocab_size is None:
+ raise ValueError(
+ f"You are trying to instantiate {self.__class__} with a configuration that "
+ "does not define the vocabulary size of the language model head. Please "
+ "instantiate the model as follows: `MCTCTForCTC.from_pretrained(..., vocab_size=vocab_size)`. "
+ "or define `vocab_size` of your model's configuration."
+ )
+ output_hidden_size = config.hidden_size
+
+ self.ctc_head = nn.Linear(output_hidden_size, config.vocab_size)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(MCTCT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=CausalLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_CTC_EXPECTED_OUTPUT,
+ expected_loss=_CTC_EXPECTED_LOSS,
+ )
+ def forward(
+ self,
+ input_features: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: Optional[torch.LongTensor] = None,
+ ) -> Union[Tuple, CausalLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
+ Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
+ the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
+ All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
+ config.vocab_size - 1]`.
+ """
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ outputs = self.mctct(
+ input_features,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+
+ logits = self.ctc_head(hidden_states)
+
+ loss = None
+ if labels is not None:
+ if labels.max() >= self.config.vocab_size:
+ raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}")
+
+ # retrieve loss input_lengths from attention_mask
+ attention_mask = (
+ attention_mask
+ if attention_mask is not None
+ else torch.ones(input_features.shape[:-1], dtype=torch.long)
+ )
+ input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
+ # assuming that padded tokens are filled with -100
+ # when not being attended to
+ labels_mask = labels >= 0
+ target_lengths = labels_mask.sum(-1)
+ flattened_targets = labels.masked_select(labels_mask)
+
+ # ctc_loss doesn't support fp16
+ log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)
+
+ with torch.backends.cudnn.flags(enabled=False):
+ loss = nn.functional.ctc_loss(
+ log_probs,
+ flattened_targets,
+ input_lengths,
+ target_lengths,
+ blank=self.config.pad_token_id,
+ reduction=self.config.ctc_loss_reduction,
+ zero_infinity=self.config.ctc_zero_infinity,
+ )
+
+ if not return_dict:
+ output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
+ return ((loss,) + output) if loss is not None else output
+
+ return CausalLMOutput(
+ loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/processing_mctct.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/processing_mctct.py
new file mode 100644
index 0000000000000000000000000000000000000000..4e0cbe27dd9be0244d63a23256808cc421fa1fa5
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/mctct/processing_mctct.py
@@ -0,0 +1,142 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Speech processor class for M-CTC-T
+"""
+import warnings
+from contextlib import contextmanager
+
+from ....processing_utils import ProcessorMixin
+
+
+class MCTCTProcessor(ProcessorMixin):
+ r"""
+ Constructs a MCTCT processor which wraps a MCTCT feature extractor and a MCTCT tokenizer into a single processor.
+
+ [`MCTCTProcessor`] offers all the functionalities of [`MCTCTFeatureExtractor`] and [`AutoTokenizer`]. See the
+ [`~MCTCTProcessor.__call__`] and [`~MCTCTProcessor.decode`] for more information.
+
+ Args:
+ feature_extractor (`MCTCTFeatureExtractor`):
+ An instance of [`MCTCTFeatureExtractor`]. The feature extractor is a required input.
+ tokenizer (`AutoTokenizer`):
+ An instance of [`AutoTokenizer`]. The tokenizer is a required input.
+ """
+
+ feature_extractor_class = "MCTCTFeatureExtractor"
+ tokenizer_class = "AutoTokenizer"
+
+ def __init__(self, feature_extractor, tokenizer):
+ super().__init__(feature_extractor, tokenizer)
+ self.current_processor = self.feature_extractor
+ self._in_target_context_manager = False
+
+ def __call__(self, *args, **kwargs):
+ """
+ When used in normal mode, this method forwards all its arguments to MCTCTFeatureExtractor's
+ [`~MCTCTFeatureExtractor.__call__`] and returns its output. If used in the context
+ [`~MCTCTProcessor.as_target_processor`] this method forwards all its arguments to AutoTokenizer's
+ [`~AutoTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more information.
+ """
+ # For backward compatibility
+ if self._in_target_context_manager:
+ return self.current_processor(*args, **kwargs)
+
+ if "raw_speech" in kwargs:
+ warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.")
+ audio = kwargs.pop("raw_speech")
+ else:
+ audio = kwargs.pop("audio", None)
+ sampling_rate = kwargs.pop("sampling_rate", None)
+ text = kwargs.pop("text", None)
+ if len(args) > 0:
+ audio = args[0]
+ args = args[1:]
+
+ if audio is None and text is None:
+ raise ValueError("You need to specify either an `audio` or `text` input to process.")
+
+ if audio is not None:
+ inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs)
+ if text is not None:
+ encodings = self.tokenizer(text, **kwargs)
+
+ if text is None:
+ return inputs
+ elif audio is None:
+ return encodings
+ else:
+ inputs["labels"] = encodings["input_ids"]
+ return inputs
+
+ def batch_decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to AutoTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer
+ to the docstring of this method for more information.
+ """
+ return self.tokenizer.batch_decode(*args, **kwargs)
+
+ def pad(self, *args, **kwargs):
+ """
+ When used in normal mode, this method forwards all its arguments to MCTCTFeatureExtractor's
+ [`~MCTCTFeatureExtractor.pad`] and returns its output. If used in the context
+ [`~MCTCTProcessor.as_target_processor`] this method forwards all its arguments to PreTrainedTokenizer's
+ [`~PreTrainedTokenizer.pad`]. Please refer to the docstring of the above two methods for more information.
+ """
+ # For backward compatibility
+ if self._in_target_context_manager:
+ return self.current_processor.pad(*args, **kwargs)
+
+ input_features = kwargs.pop("input_features", None)
+ labels = kwargs.pop("labels", None)
+ if len(args) > 0:
+ input_features = args[0]
+ args = args[1:]
+
+ if input_features is not None:
+ input_features = self.feature_extractor.pad(input_features, *args, **kwargs)
+ if labels is not None:
+ labels = self.tokenizer.pad(labels, **kwargs)
+
+ if labels is None:
+ return input_features
+ elif input_features is None:
+ return labels
+ else:
+ input_features["labels"] = labels["input_ids"]
+ return input_features
+
+ def decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to AutoTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the
+ docstring of this method for more information.
+ """
+ return self.tokenizer.decode(*args, **kwargs)
+
+ @contextmanager
+ def as_target_processor(self):
+ """
+ Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning MCTCT.
+ """
+ warnings.warn(
+ "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
+ "labels by using the argument `text` of the regular `__call__` method (either in the same call as "
+ "your audio inputs, or in a separate call."
+ )
+ self._in_target_context_manager = True
+ self.current_processor = self.tokenizer
+ yield
+ self.current_processor = self.feature_extractor
+ self._in_target_context_manager = False
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f7767986be813d839e9c6ae5fe400da7e627596a
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4e59ed4706204894516b966975dbbb88d462ab29
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/__init__.py
@@ -0,0 +1,112 @@
+# Copyright 2021 The EleutherAI and HuggingFace Teams. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_flax_available,
+ is_tf_available,
+ is_torch_available,
+)
+
+
+_import_structure = {"configuration_gptj": ["GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTJConfig", "GPTJOnnxConfig"]}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_gptj"] = [
+ "GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "GPTJForCausalLM",
+ "GPTJForQuestionAnswering",
+ "GPTJForSequenceClassification",
+ "GPTJModel",
+ "GPTJPreTrainedModel",
+ ]
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_gptj"] = [
+ "TFGPTJForCausalLM",
+ "TFGPTJForQuestionAnswering",
+ "TFGPTJForSequenceClassification",
+ "TFGPTJModel",
+ "TFGPTJPreTrainedModel",
+ ]
+
+try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_flax_gptj"] = [
+ "FlaxGPTJForCausalLM",
+ "FlaxGPTJModel",
+ "FlaxGPTJPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_gptj import GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTJConfig, GPTJOnnxConfig
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_gptj import (
+ GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST,
+ GPTJForCausalLM,
+ GPTJForQuestionAnswering,
+ GPTJForSequenceClassification,
+ GPTJModel,
+ GPTJPreTrainedModel,
+ )
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_gptj import (
+ TFGPTJForCausalLM,
+ TFGPTJForQuestionAnswering,
+ TFGPTJForSequenceClassification,
+ TFGPTJModel,
+ TFGPTJPreTrainedModel,
+ )
+
+ try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel, FlaxGPTJPreTrainedModel
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a54005c6259709d5b1aaedba1691caa71239d4f6
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/configuration_gptj.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/configuration_gptj.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..006a3026c7114678823e6862333364c4d267b286
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/configuration_gptj.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_flax_gptj.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_flax_gptj.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..50b51d3771867abaaa6c06cc936f7a8a8ba026ec
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_flax_gptj.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_gptj.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_gptj.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2533bdf7b8e13059e01f23faf985ba3b02b38069
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_gptj.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_tf_gptj.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_tf_gptj.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..678f339950fa888069a5903bf8b7cfaa03b29919
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_tf_gptj.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/configuration_gptj.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/configuration_gptj.py
new file mode 100644
index 0000000000000000000000000000000000000000..47b122427932135d16029152c5daaf9cf620c17e
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/configuration_gptj.py
@@ -0,0 +1,220 @@
+# coding=utf-8
+# Copyright 2021 The EleutherAI and HuggingFace Teams. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" GPT-J model configuration"""
+from collections import OrderedDict
+from typing import Any, List, Mapping, Optional
+
+from ... import PreTrainedTokenizer, TensorType, is_torch_available
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxConfigWithPast, PatchingSpec
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
+ # See all GPT-J models at https://huggingface.co/models?filter=gpt_j
+}
+
+
+class GPTJConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`GPTJModel`]. It is used to instantiate a GPT-J
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the GPT-J
+ [EleutherAI/gpt-j-6B](https://huggingface.co/EleutherAI/gpt-j-6B) architecture. Configuration objects inherit from
+ [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`]
+ for more information.
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 50400):
+ Vocabulary size of the GPT-J model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`GPTJModel`].
+ n_positions (`int`, *optional*, defaults to 2048):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ n_embd (`int`, *optional*, defaults to 4096):
+ Dimensionality of the embeddings and hidden states.
+ n_layer (`int`, *optional*, defaults to 28):
+ Number of hidden layers in the Transformer encoder.
+ n_head (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ rotary_dim (`int`, *optional*, defaults to 64):
+ Number of dimensions in the embedding that Rotary Position Embedding is applied to.
+ n_inner (`int`, *optional*, defaults to None):
+ Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
+ activation_function (`str`, *optional*, defaults to `"gelu_new"`):
+ Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
+ resid_pdrop (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ embd_pdrop (`int`, *optional*, defaults to 0.1):
+ The dropout ratio for the embeddings.
+ attn_pdrop (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention.
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
+ The epsilon to use in the layer normalization layers.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models).
+
+ Example:
+
+ ```python
+ >>> from transformers import GPTJModel, GPTJConfig
+
+ >>> # Initializing a GPT-J 6B configuration
+ >>> configuration = GPTJConfig()
+
+ >>> # Initializing a model from the configuration
+ >>> model = GPTJModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "gptj"
+ attribute_map = {
+ "max_position_embeddings": "n_positions",
+ "hidden_size": "n_embd",
+ "num_attention_heads": "n_head",
+ "num_hidden_layers": "n_layer",
+ }
+
+ def __init__(
+ self,
+ vocab_size=50400,
+ n_positions=2048,
+ n_embd=4096,
+ n_layer=28,
+ n_head=16,
+ rotary_dim=64,
+ n_inner=None,
+ activation_function="gelu_new",
+ resid_pdrop=0.0,
+ embd_pdrop=0.0,
+ attn_pdrop=0.0,
+ layer_norm_epsilon=1e-5,
+ initializer_range=0.02,
+ use_cache=True,
+ bos_token_id=50256,
+ eos_token_id=50256,
+ tie_word_embeddings=False,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.n_positions = n_positions
+ self.n_embd = n_embd
+ self.n_layer = n_layer
+ self.n_head = n_head
+ self.n_inner = n_inner
+ self.rotary_dim = rotary_dim
+ self.activation_function = activation_function
+ self.resid_pdrop = resid_pdrop
+ self.embd_pdrop = embd_pdrop
+ self.attn_pdrop = attn_pdrop
+ self.layer_norm_epsilon = layer_norm_epsilon
+ self.initializer_range = initializer_range
+ self.use_cache = use_cache
+
+ self.bos_token_id = bos_token_id
+ self.eos_token_id = eos_token_id
+
+ super().__init__(
+ bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs
+ )
+
+
+# Copied from transformers.models.gpt2.configuration_gpt2.GPT2OnnxConfig
+class GPTJOnnxConfig(OnnxConfigWithPast):
+ def __init__(
+ self,
+ config: PretrainedConfig,
+ task: str = "default",
+ patching_specs: List[PatchingSpec] = None,
+ use_past: bool = False,
+ ):
+ super().__init__(config, task=task, patching_specs=patching_specs, use_past=use_past)
+ if not getattr(self._config, "pad_token_id", None):
+ # TODO: how to do that better?
+ self._config.pad_token_id = 0
+
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ common_inputs = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
+ if self.use_past:
+ self.fill_with_past_key_values_(common_inputs, direction="inputs")
+ common_inputs["attention_mask"] = {0: "batch", 1: "past_sequence + sequence"}
+ else:
+ common_inputs["attention_mask"] = {0: "batch", 1: "sequence"}
+
+ return common_inputs
+
+ @property
+ def num_layers(self) -> int:
+ return self._config.n_layer
+
+ @property
+ def num_attention_heads(self) -> int:
+ return self._config.n_head
+
+ def generate_dummy_inputs(
+ self,
+ tokenizer: PreTrainedTokenizer,
+ batch_size: int = -1,
+ seq_length: int = -1,
+ is_pair: bool = False,
+ framework: Optional[TensorType] = None,
+ ) -> Mapping[str, Any]:
+ common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
+ )
+
+ # We need to order the input in the way they appears in the forward()
+ ordered_inputs = OrderedDict({"input_ids": common_inputs["input_ids"]})
+
+ # Need to add the past_keys
+ if self.use_past:
+ if not is_torch_available():
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
+ else:
+ import torch
+
+ batch, seqlen = common_inputs["input_ids"].shape
+ # Not using the same length for past_key_values
+ past_key_values_length = seqlen + 2
+ past_shape = (
+ batch,
+ self.num_attention_heads,
+ past_key_values_length,
+ self._config.hidden_size // self.num_attention_heads,
+ )
+ ordered_inputs["past_key_values"] = [
+ (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(self.num_layers)
+ ]
+
+ ordered_inputs["attention_mask"] = common_inputs["attention_mask"]
+ if self.use_past:
+ mask_dtype = ordered_inputs["attention_mask"].dtype
+ ordered_inputs["attention_mask"] = torch.cat(
+ [ordered_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1
+ )
+
+ return ordered_inputs
+
+ @property
+ def default_onnx_opset(self) -> int:
+ return 13
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/modeling_flax_gptj.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/modeling_flax_gptj.py
new file mode 100644
index 0000000000000000000000000000000000000000..9f0d4d6e86000384544fa2873690b09d34a050a2
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/modeling_flax_gptj.py
@@ -0,0 +1,718 @@
+# coding=utf-8
+# Copyright 2021 The EleutherAI and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from functools import partial
+from typing import Optional, Tuple
+
+import flax.linen as nn
+import jax
+import jax.numpy as jnp
+import numpy as np
+from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
+from flax.linen import combine_masks, make_causal_mask
+from flax.linen.attention import dot_product_attention_weights
+from flax.traverse_util import flatten_dict, unflatten_dict
+from jax import lax
+
+from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput
+from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging
+from .configuration_gptj import GPTJConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "gptj"
+_CONFIG_FOR_DOC = "GPTJConfig"
+
+
+GPTJ_START_DOCSTRING = r"""
+
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a Flax Linen
+ [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
+ regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
+
+ Finally, this model supports inherent JAX features such as:
+
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
+
+ Parameters:
+ config ([`GPTJConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
+ `jax.numpy.bfloat16` (on TPUs).
+
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
+ specified all the computation will be performed with the given `dtype`.
+
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
+ parameters.**
+
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
+ [`~FlaxPreTrainedModel.to_bf16`].
+"""
+
+GPTJ_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`numpy.ndarray` of shape `(batch_size, input_ids_length)`):
+ `input_ids_length` = `sequence_length`. Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+ past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
+ Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
+ auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+def create_sinusoidal_positions(num_pos, dim):
+ inv_freq = 1.0 / (10000 ** (np.arange(0, dim, 2) / dim))
+ sinusoid_inp = np.einsum("i , j -> i j", np.arange(num_pos), inv_freq).astype("float32")
+ sin, cos = np.sin(sinusoid_inp), np.cos(sinusoid_inp)
+
+ sentinel = dim // 2 + dim % 2
+ out = np.zeros((num_pos, dim))
+ out[:, 0:sentinel] = sin
+ out[:, sentinel:] = cos
+
+ return jnp.array(out)
+
+
+def rotate_every_two(tensor):
+ rotate_half_tensor = jnp.stack((-tensor[:, :, :, 1::2], tensor[:, :, :, ::2]), axis=-1)
+ rotate_half_tensor = rotate_half_tensor.reshape(rotate_half_tensor.shape[:-2] + (-1,))
+ return rotate_half_tensor
+
+
+def apply_rotary_pos_emb(tensor, sincos):
+ sin_pos, cos_pos = sincos
+ sin_pos = sin_pos[:, :, None, :].repeat(2, 3)
+ cos_pos = cos_pos[:, :, None, :].repeat(2, 3)
+ return (tensor * cos_pos) + (rotate_every_two(tensor) * sin_pos)
+
+
+class FlaxGPTJAttention(nn.Module):
+ config: GPTJConfig
+ dtype: jnp.dtype = jnp.float32
+ causal: bool = True
+ is_cross_attention: bool = False
+
+ def setup(self):
+ config = self.config
+ self.embed_dim = config.hidden_size
+ self.num_heads = config.num_attention_heads
+ self.head_dim = self.embed_dim // self.num_heads
+
+ self.rotary_dim = config.rotary_dim
+
+ dense = partial(
+ nn.Dense,
+ self.embed_dim,
+ use_bias=False,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ )
+
+ self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
+ self.out_proj = dense()
+
+ self.resid_dropout = nn.Dropout(rate=config.resid_pdrop)
+
+ self.causal_mask = make_causal_mask(jnp.ones((1, config.max_position_embeddings), dtype="bool"), dtype="bool")
+
+ pos_embd_dim = self.rotary_dim or self.embed_dim
+ self.embed_positions = create_sinusoidal_positions(config.max_position_embeddings, pos_embd_dim)
+
+ def _split_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
+
+ def _merge_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
+
+ @nn.compact
+ def _concatenate_to_cache(self, key, value, query, attention_mask):
+ """
+ This function takes projected key, value states from a single input token and concatenates the states to cached
+ states from previous steps. This function is slighly adapted from the official Flax repository:
+ https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
+ """
+ # detect if we're initializing by absence of existing cache data.
+ is_initialized = self.has_variable("cache", "cached_key")
+ cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
+ cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
+ cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
+
+ if is_initialized:
+ *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
+ # update key, value caches with our new 1d spatial slices
+ cur_index = cache_index.value
+ indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
+ key = lax.dynamic_update_slice(cached_key.value, key, indices)
+ value = lax.dynamic_update_slice(cached_value.value, value, indices)
+ cached_key.value = key
+ cached_value.value = value
+ num_updated_cache_vectors = query.shape[1]
+ cache_index.value = cache_index.value + num_updated_cache_vectors
+ # causal mask for cached decoder self-attention: our single query position should only attend to those key
+ # positions that have already been generated and cached, not the remaining zero elements.
+ pad_mask = jnp.broadcast_to(
+ jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
+ tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
+ )
+ attention_mask = combine_masks(pad_mask, attention_mask)
+ return key, value, attention_mask
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask,
+ position_ids,
+ deterministic: bool = True,
+ init_cache: bool = False,
+ output_attentions: bool = False,
+ ):
+ query = self.q_proj(hidden_states)
+ key = self.k_proj(hidden_states)
+ value = self.v_proj(hidden_states)
+
+ query = self._split_heads(query)
+ key = self._split_heads(key)
+ value = self._split_heads(value)
+
+ sincos = jnp.take(self.embed_positions, position_ids, axis=0)
+ sincos = jnp.split(sincos, 2, axis=-1)
+ if self.rotary_dim is not None:
+ k_rot = key[:, :, :, : self.rotary_dim]
+ k_pass = key[:, :, :, self.rotary_dim :]
+
+ q_rot = query[:, :, :, : self.rotary_dim]
+ q_pass = query[:, :, :, self.rotary_dim :]
+
+ k_rot = apply_rotary_pos_emb(k_rot, sincos)
+ q_rot = apply_rotary_pos_emb(q_rot, sincos)
+
+ key = jnp.concatenate([k_rot, k_pass], axis=-1)
+ query = jnp.concatenate([q_rot, q_pass], axis=-1)
+ else:
+ key = apply_rotary_pos_emb(key, sincos)
+ query = apply_rotary_pos_emb(query, sincos)
+
+ query_length, key_length = query.shape[1], key.shape[1]
+
+ if self.has_variable("cache", "cached_key"):
+ mask_shift = self.variables["cache"]["cache_index"]
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
+ causal_mask = lax.dynamic_slice(
+ self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
+ )
+ else:
+ causal_mask = self.causal_mask[:, :, :query_length, :key_length]
+
+ batch_size = hidden_states.shape[0]
+ causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
+
+ attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
+ attention_mask = combine_masks(attention_mask, causal_mask)
+
+ dropout_rng = None
+ if not deterministic and self.config.attn_pdrop > 0.0:
+ dropout_rng = self.make_rng("dropout")
+
+ # During fast autoregressive decoding, we feed one position at a time,
+ # and cache the keys and values step by step.
+ if self.has_variable("cache", "cached_key") or init_cache:
+ key, value, attention_mask = self._concatenate_to_cache(key, value, query, attention_mask)
+
+ # transform boolean mask into float mask
+ attention_bias = lax.select(
+ attention_mask > 0,
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
+ jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
+ )
+
+ # usual dot product attention
+ attn_weights = dot_product_attention_weights(
+ query,
+ key,
+ bias=attention_bias,
+ dropout_rng=dropout_rng,
+ dropout_rate=self.config.attn_pdrop,
+ deterministic=deterministic,
+ dtype=self.dtype,
+ precision=None,
+ )
+
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value)
+ attn_output = self._merge_heads(attn_output)
+ attn_output = self.out_proj(attn_output)
+ attn_output = self.resid_dropout(attn_output, deterministic=deterministic)
+
+ outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
+ return outputs
+
+
+class FlaxGPTJMLP(nn.Module):
+ config: GPTJConfig
+ intermediate_size: int
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ embed_dim = self.config.hidden_size
+ kernel_init = jax.nn.initializers.normal(self.config.initializer_range)
+
+ self.fc_in = nn.Dense(self.intermediate_size, dtype=self.dtype, kernel_init=kernel_init)
+ self.fc_out = nn.Dense(embed_dim, dtype=self.dtype, kernel_init=kernel_init)
+
+ self.act = ACT2FN[self.config.activation_function]
+ self.dropout = nn.Dropout(rate=self.config.resid_pdrop)
+
+ def __call__(self, hidden_states, deterministic: bool = True):
+ hidden_states = self.fc_in(hidden_states)
+ hidden_states = self.act(hidden_states)
+ hidden_states = self.fc_out(hidden_states)
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ return hidden_states
+
+
+class FlaxGPTJBlock(nn.Module):
+ config: GPTJConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ hidden_size = self.config.hidden_size
+ inner_dim = self.config.n_inner if self.config.n_inner is not None else 4 * hidden_size
+
+ self.ln_1 = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype)
+ self.attn = FlaxGPTJAttention(self.config, dtype=self.dtype)
+
+ self.mlp = FlaxGPTJMLP(self.config, inner_dim, dtype=self.dtype)
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask=None,
+ position_ids=None,
+ deterministic: bool = True,
+ init_cache: bool = False,
+ output_attentions: bool = False,
+ ):
+ residual = hidden_states
+ hidden_states = self.ln_1(hidden_states)
+ attn_outputs = self.attn(
+ hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ deterministic=deterministic,
+ init_cache=init_cache,
+ output_attentions=output_attentions,
+ )
+ attn_output = attn_outputs[0]
+
+ feed_forward_hidden_states = self.mlp(hidden_states, deterministic=deterministic)
+ # residual connection
+ hidden_states = attn_output + feed_forward_hidden_states + residual
+
+ return (hidden_states,) + attn_outputs[1:]
+
+
+class FlaxGPTJPreTrainedModel(FlaxPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = GPTJConfig
+ base_model_prefix = "transformer"
+ module_class: nn.Module = None
+
+ def __init__(
+ self,
+ config: GPTJConfig,
+ input_shape: Tuple = (1, 1),
+ seed: int = 0,
+ dtype: jnp.dtype = jnp.float32,
+ _do_init: bool = True,
+ **kwargs,
+ ):
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
+
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
+ # init input tensors
+ input_ids = jnp.zeros(input_shape, dtype="i4")
+ attention_mask = jnp.ones_like(input_ids)
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape)
+ params_rng, dropout_rng = jax.random.split(rng)
+ rngs = {"params": params_rng, "dropout": dropout_rng}
+
+ if self.config.add_cross_attention:
+ encoder_hidden_states = jnp.zeros(input_shape + (self.config.n_embd,))
+ encoder_attention_mask = attention_mask
+ module_init_outputs = self.module.init(
+ rngs,
+ input_ids,
+ attention_mask,
+ position_ids,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ return_dict=False,
+ )
+ else:
+ module_init_outputs = self.module.init(rngs, input_ids, attention_mask, position_ids, return_dict=False)
+
+ random_params = module_init_outputs["params"]
+
+ if params is not None:
+ random_params = flatten_dict(unfreeze(random_params))
+ params = flatten_dict(unfreeze(params))
+ for missing_key in self._missing_keys:
+ params[missing_key] = random_params[missing_key]
+ self._missing_keys = set()
+ return freeze(unflatten_dict(params))
+ else:
+ return random_params
+
+ def init_cache(self, batch_size, max_length):
+ r"""
+ Args:
+ batch_size (`int`):
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
+ max_length (`int`):
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
+ cache.
+ """
+ # init input variables to retrieve cache
+ input_ids = jnp.ones((batch_size, max_length))
+ attention_mask = jnp.ones_like(input_ids)
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
+
+ init_variables = self.module.init(
+ jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True
+ )
+ return init_variables["cache"]
+
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING)
+ def __call__(
+ self,
+ input_ids,
+ attention_mask=None,
+ position_ids=None,
+ params: dict = None,
+ past_key_values: dict = None,
+ dropout_rng: jax.random.PRNGKey = None,
+ train: bool = False,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ):
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ batch_size, sequence_length = input_ids.shape
+
+ if position_ids is None:
+ if past_key_values is not None:
+ raise ValueError("Make sure to provide `position_ids` when passing `past_key_values`.")
+
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
+
+ if attention_mask is None:
+ attention_mask = jnp.ones((batch_size, sequence_length))
+
+ # Handle any PRNG if needed
+ rngs = {}
+ if dropout_rng is not None:
+ rngs["dropout"] = dropout_rng
+
+ inputs = {"params": params or self.params}
+
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be changed by FlaxGPTJAttention module
+ if past_key_values:
+ inputs["cache"] = past_key_values
+ mutable = ["cache"]
+ else:
+ mutable = False
+
+ outputs = self.module.apply(
+ inputs,
+ jnp.array(input_ids, dtype="i4"),
+ jnp.array(attention_mask, dtype="i4"),
+ jnp.array(position_ids, dtype="i4"),
+ not train,
+ False,
+ output_attentions,
+ output_hidden_states,
+ return_dict,
+ rngs=rngs,
+ mutable=mutable,
+ )
+
+ # add updated cache to model output
+ if past_key_values is not None and return_dict:
+ outputs, past_key_values = outputs
+ outputs["past_key_values"] = unfreeze(past_key_values["cache"])
+ return outputs
+ elif past_key_values is not None and not return_dict:
+ outputs, past_key_values = outputs
+ outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:]
+
+ return outputs
+
+
+class FlaxGPTJBlockCollection(nn.Module):
+ config: GPTJConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.blocks = [
+ FlaxGPTJBlock(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers)
+ ]
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask=None,
+ position_ids=None,
+ deterministic: bool = True,
+ init_cache: bool = False,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ all_attentions = () if output_attentions else None
+ all_hidden_states = () if output_hidden_states else None
+
+ for block in self.blocks:
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ layer_outputs = block(
+ hidden_states,
+ attention_mask,
+ position_ids=position_ids,
+ deterministic=deterministic,
+ init_cache=init_cache,
+ output_attentions=output_attentions,
+ )
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions += (layer_outputs[1],)
+
+ # this contains possible `None` values - `FlaxGPTJModule` will filter them out
+ outputs = (hidden_states, all_hidden_states, all_attentions)
+
+ return outputs
+
+
+class FlaxGPTJModule(nn.Module):
+ config: GPTJConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.embed_dim = self.config.hidden_size
+
+ self.wte = nn.Embed(
+ self.config.vocab_size,
+ self.config.hidden_size,
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+ self.dropout = nn.Dropout(rate=self.config.embd_pdrop)
+ self.h = FlaxGPTJBlockCollection(self.config, dtype=self.dtype)
+ self.ln_f = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ position_ids,
+ deterministic=True,
+ init_cache: bool = False,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ input_embeds = self.wte(input_ids.astype("i4"))
+
+ hidden_states = self.dropout(input_embeds, deterministic=deterministic)
+
+ outputs = self.h(
+ hidden_states,
+ attention_mask,
+ position_ids=position_ids,
+ deterministic=deterministic,
+ init_cache=init_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ hidden_states = self.ln_f(hidden_states)
+
+ if output_hidden_states:
+ all_hidden_states = outputs[1] + (hidden_states,)
+ outputs = (hidden_states, all_hidden_states) + outputs[2:]
+ else:
+ outputs = (hidden_states,) + outputs[1:]
+
+ if not return_dict:
+ return tuple(v for v in outputs if v is not None)
+
+ return FlaxBaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=outputs[1],
+ attentions=outputs[-1],
+ )
+
+
+@add_start_docstrings(
+ "The bare GPTJ Model transformer outputting raw hidden-states without any specific head on top.",
+ GPTJ_START_DOCSTRING,
+)
+class FlaxGPTJModel(FlaxGPTJPreTrainedModel):
+ module_class = FlaxGPTJModule
+
+
+append_call_sample_docstring(
+ FlaxGPTJModel,
+ _CHECKPOINT_FOR_DOC,
+ FlaxCausalLMOutput,
+ _CONFIG_FOR_DOC,
+)
+
+
+class FlaxGPTJForCausalLMModule(nn.Module):
+ config: GPTJConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.transformer = FlaxGPTJModule(self.config, dtype=self.dtype)
+ self.lm_head = nn.Dense(
+ self.config.vocab_size,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ position_ids,
+ deterministic: bool = True,
+ init_cache: bool = False,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ outputs = self.transformer(
+ input_ids,
+ attention_mask,
+ position_ids,
+ deterministic=deterministic,
+ init_cache=init_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+
+ if self.config.tie_word_embeddings:
+ shared_kernel = self.transformer.variables["params"]["wte"]["embedding"].T
+ lm_logits = self.lm_head.apply({"params": {"kernel": shared_kernel}}, hidden_states)
+ else:
+ lm_logits = self.lm_head(hidden_states)
+
+ if not return_dict:
+ return (lm_logits,) + outputs[1:]
+
+ return FlaxCausalLMOutput(logits=lm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
+
+
+@add_start_docstrings(
+ """
+ The GPTJ Model transformer with a language modeling head on top.
+ """,
+ GPTJ_START_DOCSTRING,
+)
+class FlaxGPTJForCausalLM(FlaxGPTJPreTrainedModel):
+ module_class = FlaxGPTJForCausalLMModule
+
+ def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None):
+ # initializing the cache
+ batch_size, seq_length = input_ids.shape
+
+ past_key_values = self.init_cache(batch_size, max_length)
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
+ # But since GPTJ uses a causal mask, those positions are masked anyways.
+ # Thus we can create a single static attention_mask here, which is more efficient for compilation
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
+ if attention_mask is not None:
+ position_ids = attention_mask.cumsum(axis=-1) - 1
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0))
+ else:
+ position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
+
+ return {
+ "past_key_values": past_key_values,
+ "attention_mask": extended_attention_mask,
+ "position_ids": position_ids,
+ }
+
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
+ model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1
+ return model_kwargs
+
+
+append_call_sample_docstring(
+ FlaxGPTJForCausalLM,
+ _CHECKPOINT_FOR_DOC,
+ FlaxCausalLMOutput,
+ _CONFIG_FOR_DOC,
+)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/modeling_gptj.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/modeling_gptj.py
new file mode 100644
index 0000000000000000000000000000000000000000..144dbba05527452d5b5f323a4d17f659655f42b7
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/modeling_gptj.py
@@ -0,0 +1,1430 @@
+# coding=utf-8
+# Copyright 2021 The EleutherAI and HuggingFace Teams. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch GPT-J model."""
+
+import warnings
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.fx
+import torch.nn.functional as F
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import (
+ BaseModelOutputWithPast,
+ CausalLMOutputWithPast,
+ QuestionAnsweringModelOutput,
+ SequenceClassifierOutputWithPast,
+)
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_flash_attn_2_available,
+ is_flash_attn_greater_or_equal_2_10,
+ is_torch_fx_proxy,
+ logging,
+)
+from ...utils.model_parallel_utils import assert_device_map, get_device_map
+from .configuration_gptj import GPTJConfig
+
+
+if is_flash_attn_2_available():
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "hf-internal-testing/tiny-random-gptj"
+_REAL_CHECKPOINT_FOR_DOC = "EleutherAI/gpt-j-6B"
+_CONFIG_FOR_DOC = "GPTJConfig"
+
+
+GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "EleutherAI/gpt-j-6B",
+ # See all GPT-J models at https://huggingface.co/models?filter=gptj
+]
+
+
+# Copied from transformers.models.llama.modeling_llama._get_unpad_data
+def _get_unpad_data(attention_mask):
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
+ return (
+ indices,
+ cu_seqlens,
+ max_seqlen_in_batch,
+ )
+
+
+def create_sinusoidal_positions(num_pos: int, dim: int) -> torch.Tensor:
+ inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, dtype=torch.int64) / dim))
+ sinusoid_inp = torch.einsum("i , j -> i j", torch.arange(num_pos, dtype=torch.int64).float(), inv_freq).float()
+ return torch.cat((torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)), dim=1)
+
+
+@torch.fx.wrap
+def get_embed_positions(embed_positions, position_ids):
+ return embed_positions.to(position_ids.device).repeat(position_ids.shape[0], 1, 1)
+
+
+def rotate_every_two(x: torch.Tensor) -> torch.Tensor:
+ x1 = x[:, :, :, ::2]
+ x2 = x[:, :, :, 1::2]
+ x = torch.stack((-x2, x1), dim=-1)
+ return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)')
+
+
+def apply_rotary_pos_emb(tensor: torch.Tensor, sin: torch.Tensor, cos: torch.Tensor) -> torch.Tensor:
+ sin = torch.repeat_interleave(sin[:, :, None, :], 2, 3)
+ cos = torch.repeat_interleave(cos[:, :, None, :], 2, 3)
+ return (tensor * cos) + (rotate_every_two(tensor) * sin)
+
+
+class GPTJAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ max_positions = config.max_position_embeddings
+ self.register_buffer(
+ "bias",
+ torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(
+ 1, 1, max_positions, max_positions
+ ),
+ persistent=False,
+ )
+ self.register_buffer("masked_bias", torch.tensor(-1e9), persistent=False)
+
+ self.attn_dropout = nn.Dropout(config.attn_pdrop)
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
+
+ self.is_causal = True
+
+ self.embed_dim = config.hidden_size
+ self.num_attention_heads = config.num_attention_heads
+ self.head_dim = self.embed_dim // self.num_attention_heads
+ if self.head_dim * self.num_attention_heads != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and"
+ f" `num_attention_heads`: {self.num_attention_heads})."
+ )
+ self.scale_attn = torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float32)).to(torch.get_default_dtype())
+
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
+ self.rotary_dim = config.rotary_dim
+ pos_embd_dim = self.rotary_dim or self.embed_dim
+ self.embed_positions = create_sinusoidal_positions(max_positions, pos_embd_dim)
+
+ def _split_heads(self, tensor, num_attention_heads, attn_head_size, rotary):
+ """
+ Splits hidden dim into attn_head_size and num_attention_heads
+ """
+ new_shape = tensor.size()[:-1] + (num_attention_heads, attn_head_size)
+ tensor = tensor.view(new_shape)
+ if rotary:
+ return tensor
+ if len(tensor.shape) == 5:
+ return tensor.permute(0, 1, 3, 2, 4) # (batch, blocks, head, block_length, head_features)
+ elif len(tensor.shape) == 4:
+ return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
+ else:
+ raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}")
+
+ def _merge_heads(self, tensor, num_attention_heads, attn_head_size):
+ """
+ Merges attn_head_size dim and num_attn_heads dim into hidden dim
+ """
+ if len(tensor.shape) == 5:
+ tensor = tensor.permute(0, 1, 3, 2, 4).contiguous()
+ elif len(tensor.shape) == 4:
+ tensor = tensor.permute(0, 2, 1, 3).contiguous()
+ else:
+ raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}")
+ new_shape = tensor.size()[:-2] + (num_attention_heads * attn_head_size,)
+ return tensor.view(new_shape)
+
+ def _attn(
+ self,
+ query,
+ key,
+ value,
+ attention_mask=None,
+ head_mask=None,
+ ):
+ # compute causal mask from causal mask buffer
+ query_length, key_length = query.size(-2), key.size(-2)
+ causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]
+
+ # Keep the attention weights computation in fp32 to avoid overflow issues
+ query = query.to(torch.float32)
+ key = key.to(torch.float32)
+
+ attn_weights = torch.matmul(query, key.transpose(-1, -2))
+
+ mask_value = torch.finfo(attn_weights.dtype).min
+ # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
+ # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
+ mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
+ attn_weights = torch.where(causal_mask, attn_weights, mask_value)
+
+ attn_weights = attn_weights / self.scale_attn
+
+ if attention_mask is not None:
+ # Apply the attention mask
+ attn_weights = attn_weights + attention_mask
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+ attn_weights = attn_weights.to(value.dtype)
+ attn_weights = self.attn_dropout(attn_weights)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attn_weights = attn_weights * head_mask
+
+ attn_output = torch.matmul(attn_weights, value)
+
+ return attn_output, attn_weights
+
+ def _get_embed_positions(self, position_ids):
+ embed_positions = self.embed_positions
+ if embed_positions.device != position_ids.device:
+ embed_positions = embed_positions.to(position_ids.device)
+ self.embed_positions = embed_positions
+ return embed_positions.repeat(position_ids.shape[0], 1, 1)
+
+ def forward(
+ self,
+ hidden_states: torch.FloatTensor,
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = False,
+ output_attentions: Optional[bool] = False,
+ ) -> Union[
+ Tuple[torch.Tensor, Tuple[torch.Tensor]],
+ Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]],
+ ]:
+ query = self.q_proj(hidden_states)
+ key = self.k_proj(hidden_states)
+ value = self.v_proj(hidden_states)
+
+ query = self._split_heads(query, self.num_attention_heads, self.head_dim, True)
+ key = self._split_heads(key, self.num_attention_heads, self.head_dim, True)
+ value = self._split_heads(value, self.num_attention_heads, self.head_dim, False)
+
+ if is_torch_fx_proxy(position_ids) or torch.jit.is_tracing():
+ # The logic to conditionally copy to GPU could not be traced, so we do this
+ # every time in the torch.fx case
+ embed_positions = get_embed_positions(self.embed_positions, position_ids)
+ else:
+ embed_positions = self._get_embed_positions(position_ids)
+
+ repeated_position_ids = position_ids.unsqueeze(-1).repeat(1, 1, embed_positions.shape[-1])
+ sincos = torch.gather(embed_positions, 1, repeated_position_ids)
+ sin, cos = torch.split(sincos, sincos.shape[-1] // 2, dim=-1)
+
+ if self.rotary_dim is not None:
+ k_rot = key[:, :, :, : self.rotary_dim]
+ k_pass = key[:, :, :, self.rotary_dim :]
+
+ q_rot = query[:, :, :, : self.rotary_dim]
+ q_pass = query[:, :, :, self.rotary_dim :]
+
+ k_rot = apply_rotary_pos_emb(k_rot, sin, cos)
+ q_rot = apply_rotary_pos_emb(q_rot, sin, cos)
+
+ key = torch.cat([k_rot, k_pass], dim=-1)
+ query = torch.cat([q_rot, q_pass], dim=-1)
+ else:
+ key = apply_rotary_pos_emb(key, sin, cos)
+ query = apply_rotary_pos_emb(query, sin, cos)
+
+ key = key.permute(0, 2, 1, 3)
+ query = query.permute(0, 2, 1, 3)
+
+ if layer_past is not None:
+ past_key = layer_past[0]
+ past_value = layer_past[1]
+ key = torch.cat((past_key, key), dim=-2)
+ value = torch.cat((past_value, value), dim=-2)
+
+ if use_cache is True:
+ # Note that this cast is quite ugly, but is not implemented before ROPE as the original codebase keeps the key in float32 all along the computation.
+ # Reference: https://github.com/kingoflolz/mesh-transformer-jax/blob/f8315e3003033b23f21d78361b288953064e0e76/mesh_transformer/layers.py#L128
+ present = (key.to(hidden_states.dtype), value)
+ else:
+ present = None
+
+ # compute self-attention: V x Softmax(QK^T)
+ attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
+
+ attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_dim)
+ attn_output = self.out_proj(attn_output)
+ attn_output = self.resid_dropout(attn_output)
+
+ outputs = (attn_output, present)
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs # a, present, (attentions)
+
+
+class GPTJFlashAttention2(GPTJAttention):
+ """
+ GPTJ flash attention module. This module inherits from `GPTJAttention` as the weights of the module stays
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
+ flash attention and deal with padding tokens in case the input contains any of them.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
+
+ def forward(
+ self,
+ hidden_states: torch.FloatTensor,
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = False,
+ output_attentions: Optional[bool] = False,
+ ) -> Union[
+ Tuple[torch.Tensor, Tuple[torch.Tensor]],
+ Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]],
+ ]:
+ query = self.q_proj(hidden_states)
+ key = self.k_proj(hidden_states)
+ value = self.v_proj(hidden_states)
+
+ query = self._split_heads(query, self.num_attention_heads, self.head_dim, True)
+ key = self._split_heads(key, self.num_attention_heads, self.head_dim, True)
+ value = self._split_heads(value, self.num_attention_heads, self.head_dim, False)
+
+ if is_torch_fx_proxy(position_ids) or torch.jit.is_tracing():
+ # The logic to conditionally copy to GPU could not be traced, so we do this
+ # every time in the torch.fx case
+ embed_positions = get_embed_positions(self.embed_positions, position_ids)
+ else:
+ embed_positions = self._get_embed_positions(position_ids)
+
+ repeated_position_ids = position_ids.unsqueeze(-1).repeat(1, 1, embed_positions.shape[-1])
+ sincos = torch.gather(embed_positions, 1, repeated_position_ids)
+ sin, cos = torch.split(sincos, sincos.shape[-1] // 2, dim=-1)
+
+ if self.rotary_dim is not None:
+ k_rot = key[:, :, :, : self.rotary_dim]
+ k_pass = key[:, :, :, self.rotary_dim :]
+
+ q_rot = query[:, :, :, : self.rotary_dim]
+ q_pass = query[:, :, :, self.rotary_dim :]
+
+ k_rot = apply_rotary_pos_emb(k_rot, sin, cos)
+ q_rot = apply_rotary_pos_emb(q_rot, sin, cos)
+
+ key = torch.cat([k_rot, k_pass], dim=-1)
+ query = torch.cat([q_rot, q_pass], dim=-1)
+ else:
+ key = apply_rotary_pos_emb(key, sin, cos)
+ query = apply_rotary_pos_emb(query, sin, cos)
+
+ # tanspose to have the desired shape
+ # before transpose: batch_size x seq_length x num_attention_heads x head_dim
+ # after transpose: batch_size x num_attention_heads x seq_length x head_dim
+ key = key.permute(0, 2, 1, 3)
+ query = query.permute(0, 2, 1, 3)
+ # value: batch_size x num_attention_heads x seq_length x head_dim
+
+ if layer_past is not None:
+ past_key = layer_past[0]
+ past_value = layer_past[1]
+ key = torch.cat((past_key, key), dim=-2)
+ value = torch.cat((past_value, value), dim=-2)
+
+ if use_cache is True:
+ # Note that this cast is quite ugly, but is not implemented before ROPE as the original codebase keeps the key in float32 all along the computation.
+ # Reference: https://github.com/kingoflolz/mesh-transformer-jax/blob/f8315e3003033b23f21d78361b288953064e0e76/mesh_transformer/layers.py#L128
+ present = (key.to(hidden_states.dtype), value)
+ else:
+ present = None
+
+ # The Flash attention requires the input to have the shape
+ # batch_size x seq_length x head_dim x hidden_dim
+ # therefore we need to keep the original shape for query and key, and reshape value
+ # to have the correct shape.
+ key = key.permute(0, 2, 1, 3).contiguous()
+ query = query.permute(0, 2, 1, 3).contiguous()
+ value = value.permute(0, 2, 1, 3).contiguous()
+
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
+ # cast them back in the correct dtype just to be sure everything works as expected.
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
+ # in fp32. (LlamaRMSNorm handles it correctly)
+
+ input_dtype = query.dtype
+ if input_dtype == torch.float32:
+ if torch.is_autocast_enabled():
+ target_dtype = torch.get_autocast_gpu_dtype()
+ # Handle the case where the model is quantized
+ elif hasattr(self.config, "_pre_quantization_dtype"):
+ target_dtype = self.config._pre_quantization_dtype
+ else:
+ target_dtype = self.q_proj.weight.dtype
+
+ logger.warning_once(
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
+ f" {target_dtype}."
+ )
+
+ query = query.to(target_dtype)
+ key = key.to(target_dtype)
+ value = value.to(target_dtype)
+
+ attention_dropout = self.config.attn_pdrop if self.training else 0.0 # attn_pdrop in gptj
+
+ query_length = query.shape[1]
+
+ # Compute attention
+ attn_weights = self._flash_attention_forward(
+ query,
+ key,
+ value,
+ attention_mask,
+ query_length,
+ dropout=attention_dropout,
+ )
+
+ # Reshape outputs
+ attn_output = attn_weights.reshape(
+ attn_weights.shape[0], attn_weights.shape[1], attn_weights.shape[2] * attn_weights.shape[3]
+ )
+ attn_output = self.out_proj(attn_output)
+ attn_output = self.resid_dropout(attn_output)
+
+ outputs = (attn_output, present)
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward
+ def _flash_attention_forward(
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
+ ):
+ """
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
+ first unpad the input, then computes the attention scores and pad the final attention scores.
+
+ Args:
+ query_states (`torch.Tensor`):
+ Input query states to be passed to Flash Attention API
+ key_states (`torch.Tensor`):
+ Input key states to be passed to Flash Attention API
+ value_states (`torch.Tensor`):
+ Input value states to be passed to Flash Attention API
+ attention_mask (`torch.Tensor`):
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
+ position of padding tokens and 1 for the position of non-padding tokens.
+ dropout (`float`):
+ Attention dropout
+ softmax_scale (`float`, *optional*):
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
+ """
+ if not self._flash_attn_uses_top_left_mask:
+ causal = self.is_causal
+ else:
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
+ causal = self.is_causal and query_length != 1
+
+ # Contains at least one padding token in the sequence
+ if attention_mask is not None:
+ batch_size = query_states.shape[0]
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
+ query_states, key_states, value_states, attention_mask, query_length
+ )
+
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
+
+ attn_output_unpad = flash_attn_varlen_func(
+ query_states,
+ key_states,
+ value_states,
+ cu_seqlens_q=cu_seqlens_q,
+ cu_seqlens_k=cu_seqlens_k,
+ max_seqlen_q=max_seqlen_in_batch_q,
+ max_seqlen_k=max_seqlen_in_batch_k,
+ dropout_p=dropout,
+ softmax_scale=softmax_scale,
+ causal=causal,
+ )
+
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
+ else:
+ attn_output = flash_attn_func(
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
+ )
+
+ return attn_output
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input with num_heads->num_attention_heads
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
+
+ key_layer = index_first_axis(
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
+ )
+ value_layer = index_first_axis(
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
+ )
+ if query_length == kv_seq_len:
+ query_layer = index_first_axis(
+ query_layer.reshape(batch_size * kv_seq_len, self.num_attention_heads, head_dim), indices_k
+ )
+ cu_seqlens_q = cu_seqlens_k
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
+ indices_q = indices_k
+ elif query_length == 1:
+ max_seqlen_in_batch_q = 1
+ cu_seqlens_q = torch.arange(
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
+ ) # There is a memcpy here, that is very bad.
+ indices_q = cu_seqlens_q[:-1]
+ query_layer = query_layer.squeeze(1)
+ else:
+ # The -q_len: slice assumes left padding.
+ attention_mask = attention_mask[:, -query_length:]
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
+
+ return (
+ query_layer,
+ key_layer,
+ value_layer,
+ indices_q,
+ (cu_seqlens_q, cu_seqlens_k),
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
+ )
+
+
+GPTJ_ATTENTION_CLASSES = {
+ "eager": GPTJAttention,
+ "flash_attention_2": GPTJFlashAttention2,
+}
+
+
+class GPTJMLP(nn.Module):
+ def __init__(self, intermediate_size, config): # in MLP: intermediate_size= 4 * embed_dim
+ super().__init__()
+ embed_dim = config.n_embd
+
+ self.fc_in = nn.Linear(embed_dim, intermediate_size)
+ self.fc_out = nn.Linear(intermediate_size, embed_dim)
+
+ self.act = ACT2FN[config.activation_function]
+ self.dropout = nn.Dropout(config.resid_pdrop)
+
+ def forward(self, hidden_states: Optional[torch.FloatTensor]) -> torch.FloatTensor:
+ hidden_states = self.fc_in(hidden_states)
+ hidden_states = self.act(hidden_states)
+ hidden_states = self.fc_out(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ return hidden_states
+
+
+class GPTJBlock(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd
+ self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
+ self.attn = GPTJ_ATTENTION_CLASSES[config._attn_implementation](config)
+ self.mlp = GPTJMLP(inner_dim, config)
+
+ def forward(
+ self,
+ hidden_states: Optional[torch.FloatTensor],
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = False,
+ output_attentions: Optional[bool] = False,
+ ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
+ residual = hidden_states
+ hidden_states = self.ln_1(hidden_states)
+ attn_outputs = self.attn(
+ hidden_states=hidden_states,
+ layer_past=layer_past,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ )
+ attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
+ outputs = attn_outputs[1:]
+
+ feed_forward_hidden_states = self.mlp(hidden_states)
+ hidden_states = attn_output + feed_forward_hidden_states + residual
+
+ if use_cache:
+ outputs = (hidden_states,) + outputs
+ else:
+ outputs = (hidden_states,) + outputs[1:]
+
+ return outputs # hidden_states, present, (attentions)
+
+
+class GPTJPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = GPTJConfig
+ base_model_prefix = "transformer"
+ is_parallelizable = True
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["GPTJBlock"]
+ _skip_keys_device_placement = "past_key_values"
+ _supports_flash_attn_2 = True
+
+ def __init__(self, *inputs, **kwargs):
+ super().__init__(*inputs, **kwargs)
+
+ def _init_weights(self, module):
+ """Initialize the weights."""
+ if isinstance(module, (nn.Linear,)):
+ # Slightly different from Mesh Transformer JAX which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+GPTJ_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`GPTJConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+GPTJ_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.n_positions - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ head_mask (`torch.FloatTensor` of shape `(num_attention_heads,)` or `(n_layer, num_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_dim)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+PARALLELIZE_DOCSTRING = r"""
+ This is an experimental feature and is a subject to change at a moment's notice. Uses a device map to distribute
+ attention modules of the model across several devices. If no device map is given, it will evenly distribute blocks
+ across all devices.
+
+ Args:
+ device_map (`Dict[int, list]`, optional, defaults to None):
+ A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always
+ automatically mapped to the first device (for esoteric reasons). That means that the first device should
+ have fewer attention modules mapped to it than other devices. For reference, the GPT-J models have the
+ following number of attention modules:
+
+ - gpt-j-6B: 28
+
+ Example:
+
+ ```python
+ # Here is an example of a device map on a machine with 4 GPUs using gpt-j-6B, which has a total of 28 attention modules:
+ model = GPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
+ device_map = {
+ 0: [0, 1, 2, 3, 4, 5, 6],
+ 1: [7, 8, 9, 10, 11, 12, 13],
+ 2: [14, 15, 16, 17, 18, 19, 20],
+ 3: [21, 22, 23, 24, 25, 26, 27],
+ }
+ model.parallelize(device_map)
+ ```
+"""
+
+DEPARALLELIZE_DOCSTRING = r"""
+ Moves the model to CPU from a model parallel state.
+
+ Example:
+
+ ```python
+ # On a 4 GPU machine with gpt-j-6B:
+ model = GPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
+ device_map = {
+ 0: [0, 1, 2, 3, 4, 5, 6],
+ 1: [7, 8, 9, 10, 11, 12, 13],
+ 2: [14, 15, 16, 17, 18, 19, 20],
+ 3: [21, 22, 23, 24, 25, 26, 27],
+ }
+ model.parallelize(device_map) # Splits the model across several devices
+ model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()
+ ```
+"""
+
+
+@add_start_docstrings(
+ "The bare GPT-J Model transformer outputting raw hidden-states without any specific head on top.",
+ GPTJ_START_DOCSTRING,
+)
+class GPTJModel(GPTJPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.embed_dim = config.n_embd
+ self.vocab_size = config.vocab_size
+ self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
+ self.drop = nn.Dropout(config.embd_pdrop)
+ self.h = nn.ModuleList([GPTJBlock(config) for _ in range(config.n_layer)])
+ self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
+
+ # Model parallel
+ self.model_parallel = False
+ self.device_map = None
+ self.gradient_checkpointing = False
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
+
+ @add_start_docstrings(PARALLELIZE_DOCSTRING)
+ def parallelize(self, device_map=None):
+ warnings.warn(
+ "`GPTJModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your"
+ " model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own"
+ " `device_map` but it needs to be a dictionary module_name to device, so for instance {'h.0': 0, 'h.1': 1,"
+ " ...}",
+ FutureWarning,
+ )
+ # Check validity of device_map
+ self.device_map = (
+ get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map
+ )
+ assert_device_map(self.device_map, len(self.h))
+ self.model_parallel = True
+ self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys()))
+ self.last_device = "cuda:" + str(max(self.device_map.keys()))
+ self.wte = self.wte.to(self.first_device)
+ # Load onto devices
+ for k, v in self.device_map.items():
+ for block in v:
+ cuda_device = "cuda:" + str(k)
+ self.h[block] = self.h[block].to(cuda_device)
+ # ln_f to last
+ self.ln_f = self.ln_f.to(self.last_device)
+
+ @add_start_docstrings(DEPARALLELIZE_DOCSTRING)
+ def deparallelize(self):
+ warnings.warn(
+ "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.",
+ FutureWarning,
+ )
+ self.model_parallel = False
+ self.device_map = None
+ self.first_device = "cpu"
+ self.last_device = "cpu"
+ self.wte = self.wte.to("cpu")
+ for index in range(len(self.h)):
+ self.h[index] = self.h[index].to("cpu")
+ self.ln_f = self.ln_f.to("cpu")
+ torch.cuda.empty_cache()
+
+ def get_input_embeddings(self):
+ return self.wte
+
+ def set_input_embeddings(self, new_embeddings):
+ self.wte = new_embeddings
+
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithPast,
+ config_class=_CONFIG_FOR_DOC,
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ input_ids = input_ids.view(-1, input_shape[-1])
+ batch_size = input_ids.shape[0]
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ batch_size = inputs_embeds.shape[0]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ if token_type_ids is not None:
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
+
+ if past_key_values is None:
+ past_length = 0
+ past_key_values = tuple([None] * len(self.h))
+ else:
+ past_length = past_key_values[0][0].size(-2)
+
+ if position_ids is None:
+ position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
+ position_ids = position_ids.unsqueeze(0)
+
+ if not self._use_flash_attention_2:
+ # Attention mask.
+ if attention_mask is not None:
+ if batch_size <= 0:
+ raise ValueError("batch_size has to be defined and > 0")
+ attention_mask = attention_mask.view(batch_size, -1)
+ # We create a 3D attention mask from a 2D tensor mask.
+ # Sizes are [batch_size, 1, 1, to_seq_length]
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
+ # this attention mask is more simple than the triangular masking of causal attention
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
+ attention_mask = attention_mask[:, None, None, :]
+
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
+ # masked positions, this operation will create a tensor which is 0.0 for
+ # positions we want to attend and the dtype's smallest value for masked positions.
+ # Since we are adding it to the raw scores before the softmax, this is
+ # effectively the same as removing these entirely.
+ attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
+ attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x num_attention_heads x N x N
+ # head_mask has shape n_layer x batch x num_attention_heads x N x N
+ head_mask = self.get_head_mask(head_mask, self.config.n_layer)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.wte(input_ids)
+
+ hidden_states = inputs_embeds
+
+ if token_type_ids is not None:
+ token_type_embeds = self.wte(token_type_ids)
+ hidden_states = hidden_states + token_type_embeds
+
+ hidden_states = self.drop(hidden_states)
+
+ output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),)
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ presents = () if use_cache else None
+ all_self_attentions = () if output_attentions else None
+ all_hidden_states = () if output_hidden_states else None
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
+ # Model parallel
+ if self.model_parallel:
+ torch.cuda.set_device(hidden_states.device)
+ # Ensure layer_past is on same device as hidden_states (might not be correct)
+ if layer_past is not None:
+ layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past)
+ # Ensure that attention_mask is always on the same device as hidden_states
+ if attention_mask is not None:
+ attention_mask = attention_mask.to(hidden_states.device)
+ if isinstance(head_mask, torch.Tensor):
+ head_mask = head_mask.to(hidden_states.device)
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ outputs = self._gradient_checkpointing_func(
+ block.__call__,
+ hidden_states,
+ None,
+ attention_mask,
+ position_ids,
+ head_mask[i],
+ use_cache,
+ output_attentions,
+ )
+ else:
+ outputs = block(
+ hidden_states=hidden_states,
+ layer_past=layer_past,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ head_mask=head_mask[i],
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = outputs[0]
+ if use_cache is True:
+ presents = presents + (outputs[1],)
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
+
+ # Model Parallel: If it's the last layer for that device, put things on the next device
+ if self.model_parallel:
+ for k, v in self.device_map.items():
+ if i == v[-1] and "cuda:" + str(k) != self.last_device:
+ hidden_states = hidden_states.to("cuda:" + str(k + 1))
+
+ hidden_states = self.ln_f(hidden_states)
+
+ hidden_states = hidden_states.view(output_shape)
+ # Add last hidden state
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
+
+ return BaseModelOutputWithPast(
+ last_hidden_state=hidden_states,
+ past_key_values=presents,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ The GPT-J Model transformer with a language modeling head on top.
+ """,
+ GPTJ_START_DOCSTRING,
+)
+class GPTJForCausalLM(GPTJPreTrainedModel):
+ _tied_weights_keys = ["lm_head.weight"]
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.transformer = GPTJModel(config)
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size)
+
+ # Model parallel
+ self.model_parallel = False
+ self.device_map = None
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings(PARALLELIZE_DOCSTRING)
+ def parallelize(self, device_map=None):
+ warnings.warn(
+ "`GPTJForCausalLM.parallelize` is deprecated and will be removed in v5 of Transformers, you should load"
+ " your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own"
+ " `device_map` but it needs to be a dictionary module_name to device, so for instance {'transformer.h.0':"
+ " 0, 'transformer.h.1': 1, ...}",
+ FutureWarning,
+ )
+ self.device_map = (
+ get_device_map(len(self.transformer.h), range(torch.cuda.device_count()))
+ if device_map is None
+ else device_map
+ )
+ assert_device_map(self.device_map, len(self.transformer.h))
+ self.transformer.parallelize(self.device_map)
+ self.lm_head = self.lm_head.to(self.transformer.first_device)
+ self.model_parallel = True
+
+ @add_start_docstrings(DEPARALLELIZE_DOCSTRING)
+ def deparallelize(self):
+ warnings.warn(
+ "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.",
+ FutureWarning,
+ )
+ self.transformer.deparallelize()
+ self.transformer = self.transformer.to("cpu")
+ self.lm_head = self.lm_head.to("cpu")
+ self.model_parallel = False
+ torch.cuda.empty_cache()
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
+ token_type_ids = kwargs.get("token_type_ids", None)
+ # Omit tokens covered by past_key_values
+ if past_key_values:
+ past_length = past_key_values[0][0].shape[2]
+
+ # Some generation methods already pass only the last input ID
+ if input_ids.shape[1] > past_length:
+ remove_prefix_length = past_length
+ else:
+ # Default to old behavior: keep only final ID
+ remove_prefix_length = input_ids.shape[1] - 1
+
+ input_ids = input_ids[:, remove_prefix_length:]
+ if token_type_ids is not None:
+ token_type_ids = token_type_ids[:, -input_ids.shape[1] :]
+
+ attention_mask = kwargs.get("attention_mask", None)
+ position_ids = kwargs.get("position_ids", None)
+
+ if attention_mask is not None and position_ids is None:
+ # create position_ids on the fly for batch generation
+ position_ids = attention_mask.long().cumsum(-1) - 1
+ position_ids.masked_fill_(attention_mask == 0, 1)
+ if past_key_values:
+ position_ids = position_ids[:, -input_ids.shape[1] :]
+
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
+ if inputs_embeds is not None and past_key_values is None:
+ model_inputs = {"inputs_embeds": inputs_embeds}
+ else:
+ model_inputs = {"input_ids": input_ids}
+
+ model_inputs.update(
+ {
+ "past_key_values": past_key_values,
+ "use_cache": kwargs.get("use_cache"),
+ "position_ids": position_ids,
+ "attention_mask": attention_mask,
+ "token_type_ids": token_type_ids,
+ }
+ )
+
+ return model_inputs
+
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=CausalLMOutputWithPast,
+ config_class=_CONFIG_FOR_DOC,
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.transformer(
+ input_ids,
+ past_key_values=past_key_values,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = transformer_outputs[0]
+
+ # Set device for model parallelism
+ if self.model_parallel:
+ torch.cuda.set_device(self.transformer.first_device)
+ hidden_states = hidden_states.to(self.lm_head.weight.device)
+
+ # make sure sampling in fp16 works correctly and
+ # compute loss in fp32 to match with mesh-tf version
+ # https://github.com/EleutherAI/gpt-neo/blob/89ce74164da2fb16179106f54e2269b5da8db333/models/gpt2/gpt2.py#L179
+ lm_logits = self.lm_head(hidden_states).to(torch.float32)
+
+ loss = None
+ if labels is not None:
+ # move labels to correct device to enable model parallelism
+ labels = labels.to(lm_logits.device)
+ # Shift so that tokens < n predict n
+ shift_logits = lm_logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous()
+ # Flatten the tokens
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
+
+ loss = loss.to(hidden_states.dtype)
+
+ if not return_dict:
+ output = (lm_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return CausalLMOutputWithPast(
+ loss=loss,
+ logits=lm_logits,
+ past_key_values=transformer_outputs.past_key_values,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+ @staticmethod
+ def _reorder_cache(
+ past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
+ ) -> Tuple[Tuple[torch.Tensor]]:
+ """
+ This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or
+ [`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
+ beam_idx at every generation step.
+ """
+ return tuple(
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
+ for layer_past in past_key_values
+ )
+
+
+@add_start_docstrings(
+ """
+ The GPT-J Model transformer with a sequence classification head on top (linear layer).
+
+ [`GPTJForSequenceClassification`] uses the last token in order to do the classification, as other causal models
+ (e.g. GPT, GPT-2, GPT-Neo) do.
+
+ Since it does classification on the last token, it requires to know the position of the last token. If a
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
+ each row of the batch).
+ """,
+ GPTJ_START_DOCSTRING,
+)
+class GPTJForSequenceClassification(GPTJPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.transformer = GPTJModel(config)
+ self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
+
+ # Model parallel
+ self.model_parallel = False
+ self.device_map = None
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint="ydshieh/tiny-random-gptj-for-sequence-classification",
+ output_type=SequenceClassifierOutputWithPast,
+ config_class=_CONFIG_FOR_DOC,
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.transformer(
+ input_ids,
+ past_key_values=past_key_values,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = transformer_outputs[0]
+ logits = self.score(hidden_states)
+
+ if input_ids is not None:
+ batch_size = input_ids.shape[0]
+ else:
+ batch_size = inputs_embeds.shape[0]
+
+ if self.config.pad_token_id is None and batch_size != 1:
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
+ if self.config.pad_token_id is None:
+ sequence_lengths = -1
+ else:
+ if input_ids is not None:
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
+ sequence_lengths = sequence_lengths.to(logits.device)
+ else:
+ sequence_lengths = -1
+ logger.warning(
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
+ )
+
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
+
+ loss = None
+ if labels is not None:
+ labels = labels.to(pooled_logits.device)
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(pooled_logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(pooled_logits, labels)
+ if not return_dict:
+ output = (pooled_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutputWithPast(
+ loss=loss,
+ logits=pooled_logits,
+ past_key_values=transformer_outputs.past_key_values,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ The GPT-J Model transformer with a span classification head on top for extractive question-answering tasks like
+ SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ GPTJ_START_DOCSTRING,
+)
+class GPTJForQuestionAnswering(GPTJPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.transformer = GPTJModel(config)
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Model parallel
+ self.model_parallel = False
+ self.device_map = None
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=QuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ start_positions: Optional[torch.LongTensor] = None,
+ end_positions: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.transformer(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = logits.split(1, dim=-1)
+ start_logits = start_logits.squeeze(-1).contiguous()
+ end_logits = end_logits.squeeze(-1).contiguous()
+
+ total_loss = None
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, split add a dimension
+ if len(start_positions.size()) > 1:
+ start_positions = start_positions.squeeze(-1).to(start_logits.device)
+ if len(end_positions.size()) > 1:
+ end_positions = end_positions.squeeze(-1).to(end_logits.device)
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
+ ignored_index = start_logits.size(1)
+ start_positions = start_positions.clamp(0, ignored_index)
+ end_positions = end_positions.clamp(0, ignored_index)
+
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[2:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return QuestionAnsweringModelOutput(
+ loss=total_loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/modeling_tf_gptj.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/modeling_tf_gptj.py
new file mode 100644
index 0000000000000000000000000000000000000000..d948fc63c09ad468a0927b2c45db9f69a3d28370
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/gptj/modeling_tf_gptj.py
@@ -0,0 +1,1104 @@
+# coding=utf-8
+# Copyright 2022 The EleutherAI and HuggingFace Teams. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" TF 2.0 GPT-J model."""
+
+from __future__ import annotations
+
+from typing import Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ...activations_tf import get_tf_activation
+from ...file_utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+)
+from ...modeling_tf_outputs import (
+ TFBaseModelOutputWithPast,
+ TFCausalLMOutputWithPast,
+ TFQuestionAnsweringModelOutput,
+ TFSequenceClassifierOutputWithPast,
+)
+from ...modeling_tf_utils import (
+ TFCausalLanguageModelingLoss,
+ TFModelInputType,
+ TFPreTrainedModel,
+ TFQuestionAnsweringLoss,
+ TFSequenceClassificationLoss,
+ TFSharedEmbeddings,
+ get_initializer,
+ keras,
+ keras_serializable,
+ unpack_inputs,
+)
+from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
+from ...utils import logging
+from .configuration_gptj import GPTJConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "EleutherAI/gpt-j-6B"
+_CONFIG_FOR_DOC = "GPTJConfig"
+
+GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "EleutherAI/gpt-j-6B",
+ # See all GPT-J models at https://huggingface.co/models?filter=gptj
+]
+
+
+def create_sinusoidal_positions(num_pos: int, dim: int) -> tf.Tensor:
+ inv_freq = tf.cast(1.0 / (10000 ** (tf.range(0, dim, 2) / dim)), tf.float32)
+ sinusoid_inp = tf.cast(tf.einsum("i , j -> i j", tf.range(num_pos, dtype=tf.float32), inv_freq), tf.float32)
+ sin, cos = tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)
+ out = tf.concat((sin, cos), axis=1)
+ return out
+
+
+def rotate_every_two(x: tf.Tensor) -> tf.Tensor:
+ rotate_half_tensor = tf.stack((-x[:, :, :, 1::2], x[:, :, :, ::2]), axis=-1)
+ new_shape = shape_list(rotate_half_tensor)[:-2] + [tf.math.reduce_prod(shape_list(rotate_half_tensor)[-2:])]
+ rotate_half_tensor = tf.reshape(rotate_half_tensor, new_shape)
+ return rotate_half_tensor
+
+
+def apply_rotary_pos_emb(tensor: tf.Tensor, sincos: tf.Tensor) -> tf.Tensor:
+ sin_pos, cos_pos = sincos
+ sin_pos = tf.repeat(sin_pos[:, :, None, :], 2, 3)
+ cos_pos = tf.repeat(cos_pos[:, :, None, :], 2, 3)
+ return (tensor * cos_pos) + (rotate_every_two(tensor) * sin_pos)
+
+
+class TFGPTJAttention(keras.layers.Layer):
+ def __init__(self, config: GPTJConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.embed_dim = config.hidden_size
+ self.num_attention_heads = config.num_attention_heads
+ self.head_dim = self.embed_dim // self.num_attention_heads
+ if self.head_dim * self.num_attention_heads != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and"
+ f" `num_attention_heads`: {self.num_attention_heads})."
+ )
+ self.scale_attn = self.head_dim**0.5
+ self.rotary_dim = config.rotary_dim
+
+ self.attn_dropout = keras.layers.Dropout(config.attn_pdrop)
+ self.resid_dropout = keras.layers.Dropout(config.resid_pdrop)
+
+ self.q_proj = keras.layers.Dense(
+ self.embed_dim,
+ use_bias=False,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="q_proj",
+ )
+ self.k_proj = keras.layers.Dense(
+ self.embed_dim,
+ use_bias=False,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="k_proj",
+ )
+ self.v_proj = keras.layers.Dense(
+ self.embed_dim,
+ use_bias=False,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="v_proj",
+ )
+ self.out_proj = keras.layers.Dense(
+ self.embed_dim,
+ use_bias=False,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="out_proj",
+ )
+
+ self.max_positions = config.max_position_embeddings
+ self.lower_triangle_mask = tf.reshape(
+ tf.cast(tf.experimental.numpy.tril(tf.ones((self.max_positions, self.max_positions))), tf.int8),
+ (1, 1, self.max_positions, self.max_positions),
+ )
+ pos_embd_dim = self.rotary_dim or self.embed_dim
+ self.embed_positions = create_sinusoidal_positions(self.max_positions, pos_embd_dim)
+
+ def get_causal_mask(self, key_length, query_length) -> tf.Tensor:
+ return tf.cast(self.lower_triangle_mask[:, :, key_length - query_length : key_length, :key_length], tf.bool)
+
+ @staticmethod
+ def get_masked_bias(dtype: tf.DType) -> tf.Tensor:
+ return tf.cast(tf.constant(-1e9), dtype)
+
+ def _split_heads(self, hidden_states: tf.Tensor, rotary: bool) -> tf.Tensor:
+ """
+ Splits hidden dim into attn_head_size and num_attention_heads
+ """
+ new_shape = shape_list(hidden_states)[:-1] + [self.num_attention_heads, self.head_dim]
+ hidden_states = tf.reshape(hidden_states, new_shape)
+ if rotary:
+ return hidden_states
+ if len(shape_list(hidden_states)) == 4:
+ return tf.transpose(hidden_states, (0, 2, 1, 3)) # (batch, head, seq_length, head_features)
+ if len(shape_list(hidden_states)) == 5:
+ return tf.transpose(hidden_states, (0, 1, 3, 2, 4)) # (batch, blocks, head, block_length, head_features)
+ raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(shape_list(hidden_states))}")
+
+ def _merge_heads(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ """
+ Merges attn_head_size dim and num_attn_heads dim into hidden dim
+ """
+ if len(shape_list(hidden_states)) == 4:
+ hidden_states = tf.transpose(hidden_states, (0, 2, 1, 3))
+ elif len(shape_list(hidden_states)) == 5:
+ hidden_states = tf.transpose(hidden_states, (0, 1, 3, 2, 4))
+ else:
+ raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(shape_list(hidden_states))}")
+ new_shape = shape_list(hidden_states)[:-2] + [self.num_attention_heads * self.head_dim]
+ return tf.reshape(hidden_states, new_shape)
+
+ def _attn(
+ self,
+ query: tf.Tensor,
+ key: tf.Tensor,
+ value: tf.Tensor,
+ attention_mask: tf.Tensor | None = None,
+ head_mask: tf.Tensor | None = None,
+ ) -> Tuple[tf.Tensor, tf.Tensor]:
+ # compute causal mask from causal mask buffer
+ query_length, key_length = shape_list(query)[-2], shape_list(key)[-2]
+ causal_mask = self.get_causal_mask(key_length, query_length)
+
+ # Keep the attention weights computation in fp32 to avoid overflow issues
+ query = tf.cast(query, tf.float32)
+ key = tf.cast(key, tf.float32)
+
+ attn_weights = tf.matmul(query, key, transpose_b=True)
+ attn_weights = tf.where(causal_mask, attn_weights, self.get_masked_bias(attn_weights.dtype))
+
+ attn_weights = attn_weights / self.scale_attn
+
+ if attention_mask is not None:
+ # Apply the attention mask
+ attn_weights = attn_weights + attention_mask
+
+ attn_weights = stable_softmax(attn_weights, axis=-1)
+ attn_weights = tf.cast(attn_weights, value.dtype)
+ attn_weights = self.attn_dropout(attn_weights)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attn_weights = attn_weights * head_mask
+
+ attn_output = tf.matmul(attn_weights, value)
+
+ return attn_output, attn_weights
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ layer_past: Optional[Tuple[tf.Tensor, tf.Tensor]] = None,
+ attention_mask: tf.Tensor | None = None,
+ position_ids: tf.Tensor | None = None,
+ head_mask: tf.Tensor | None = None,
+ use_cache: bool = False,
+ output_attentions: bool = False,
+ ):
+ query = self.q_proj(hidden_states)
+ key = self.k_proj(hidden_states)
+ value = self.v_proj(hidden_states)
+
+ query = self._split_heads(query, True)
+ key = self._split_heads(key, True)
+ value = self._split_heads(value, False)
+
+ sincos = tf.cast(tf.gather(self.embed_positions, position_ids, axis=0), hidden_states.dtype)
+ sincos = tf.split(sincos, 2, axis=-1)
+ if self.rotary_dim is not None:
+ k_rot = key[:, :, :, : self.rotary_dim]
+ k_pass = key[:, :, :, self.rotary_dim :]
+
+ q_rot = query[:, :, :, : self.rotary_dim]
+ q_pass = query[:, :, :, self.rotary_dim :]
+
+ k_rot = apply_rotary_pos_emb(k_rot, sincos)
+ q_rot = apply_rotary_pos_emb(q_rot, sincos)
+
+ key = tf.concat((k_rot, k_pass), axis=-1)
+ query = tf.concat((q_rot, q_pass), axis=-1)
+ else:
+ key = apply_rotary_pos_emb(key, sincos)
+ query = apply_rotary_pos_emb(query, sincos)
+
+ key = tf.transpose(key, (0, 2, 1, 3))
+ query = tf.transpose(query, (0, 2, 1, 3))
+
+ if layer_past is not None:
+ past_key = layer_past[0]
+ past_value = layer_past[1]
+ key = tf.concat((past_key, key), axis=-2)
+ value = tf.concat((past_value, value), axis=-2)
+
+ if use_cache is True:
+ present = (key, value)
+ else:
+ present = None
+
+ # compute self-attention: V x Softmax(QK^T)
+ attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
+
+ attn_output = self._merge_heads(attn_output)
+ attn_output = self.out_proj(attn_output)
+ attn_output = self.resid_dropout(attn_output)
+
+ outputs = (attn_output, present)
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs # a, present, (attentions)
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "q_proj", None) is not None:
+ with tf.name_scope(self.q_proj.name):
+ self.q_proj.build([None, None, self.embed_dim])
+ if getattr(self, "k_proj", None) is not None:
+ with tf.name_scope(self.k_proj.name):
+ self.k_proj.build([None, None, self.embed_dim])
+ if getattr(self, "v_proj", None) is not None:
+ with tf.name_scope(self.v_proj.name):
+ self.v_proj.build([None, None, self.embed_dim])
+ if getattr(self, "out_proj", None) is not None:
+ with tf.name_scope(self.out_proj.name):
+ self.out_proj.build([None, None, self.embed_dim])
+
+
+class TFGPTJMLP(keras.layers.Layer):
+ def __init__(self, intermediate_size: int, config: GPTJConfig, **kwargs):
+ super().__init__(**kwargs)
+ embed_dim = config.n_embd
+
+ self.fc_in = keras.layers.Dense(
+ intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="fc_in"
+ )
+ self.fc_out = keras.layers.Dense(
+ embed_dim, kernel_initializer=get_initializer(config.initializer_range), name="fc_out"
+ )
+
+ self.act = get_tf_activation(config.activation_function)
+ self.dropout = keras.layers.Dropout(config.embd_pdrop)
+ self.embed_dim = config.n_embd
+ self.intermediate_size = intermediate_size
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ hidden_states = self.fc_in(hidden_states)
+ hidden_states = self.act(hidden_states)
+ hidden_states = self.fc_out(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "fc_in", None) is not None:
+ with tf.name_scope(self.fc_in.name):
+ self.fc_in.build([None, None, self.embed_dim])
+ if getattr(self, "fc_out", None) is not None:
+ with tf.name_scope(self.fc_out.name):
+ self.fc_out.build([None, None, self.intermediate_size])
+
+
+class TFGPTJBlock(keras.layers.Layer):
+ def __init__(self, config: GPTJConfig, **kwargs):
+ super().__init__(**kwargs)
+ inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd
+ self.ln_1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_1")
+ self.attn = TFGPTJAttention(config, name="attn")
+ self.mlp = TFGPTJMLP(inner_dim, config, name="mlp")
+ self.config = config
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ layer_past: tf.Tensor | None = None,
+ attention_mask: tf.Tensor | None = None,
+ position_ids: tf.Tensor | None = None,
+ head_mask: tf.Tensor | None = None,
+ use_cache: bool = False,
+ output_attentions: bool = False,
+ ):
+ residual = hidden_states
+ hidden_states = self.ln_1(hidden_states)
+ attn_outputs = self.attn(
+ hidden_states=hidden_states,
+ layer_past=layer_past,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ ) # attn_outputs: attn_output, present, (attentions)
+ attn_output = attn_outputs[0]
+ outputs = attn_outputs[1:]
+
+ feed_forward_hidden_states = self.mlp(hidden_states)
+ hidden_states = attn_output + feed_forward_hidden_states + residual
+
+ if use_cache:
+ outputs = (hidden_states,) + outputs
+ else:
+ outputs = (hidden_states,) + outputs[1:]
+ return outputs # hidden_states, present, (attentions)
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "ln_1", None) is not None:
+ with tf.name_scope(self.ln_1.name):
+ self.ln_1.build([None, None, self.config.n_embd])
+ if getattr(self, "attn", None) is not None:
+ with tf.name_scope(self.attn.name):
+ self.attn.build(None)
+ if getattr(self, "mlp", None) is not None:
+ with tf.name_scope(self.mlp.name):
+ self.mlp.build(None)
+
+
+@keras_serializable
+class TFGPTJMainLayer(keras.layers.Layer):
+ config_class = GPTJConfig
+
+ def __init__(self, config: GPTJConfig, *inputs, **kwargs):
+ super().__init__(*inputs, **kwargs)
+
+ self.config = config
+ self.output_attentions = config.output_attentions
+ self.output_hidden_states = config.output_hidden_states
+ self.use_cache = config.use_cache
+ self.return_dict = config.use_return_dict
+
+ self.num_hidden_layers = config.n_layer
+ self.n_embd = config.n_embd
+ self.n_positions = config.n_positions
+ self.initializer_range = config.initializer_range
+
+ self.wte = TFSharedEmbeddings(
+ config.vocab_size, config.hidden_size, initializer_range=config.initializer_range, name="wte"
+ )
+ self.drop = keras.layers.Dropout(config.embd_pdrop)
+ self.h = [TFGPTJBlock(config, name=f"h_._{i}") for i in range(config.n_layer)]
+ self.ln_f = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_f")
+ self.embed_dim = config.n_embd
+
+ def get_input_embeddings(self):
+ return self.wte
+
+ def set_input_embeddings(self, value: tf.Tensor):
+ self.wte.weight = value
+ self.wte.vocab_size = shape_list(value)[0]
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
+ """
+ raise NotImplementedError
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids=None,
+ past_key_values=None,
+ attention_mask=None,
+ token_type_ids=None,
+ position_ids=None,
+ head_mask=None,
+ inputs_embeds=None,
+ use_cache=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ training=False,
+ ) -> Union[TFBaseModelOutputWithPast, Tuple[tf.Tensor]]:
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = shape_list(input_ids)
+ input_ids = tf.reshape(input_ids, [-1, input_shape[-1]])
+ elif inputs_embeds is not None:
+ input_shape = shape_list(inputs_embeds)[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if past_key_values is None:
+ past_length = 0
+ past_key_values = [None] * len(self.h)
+ else:
+ past_length = shape_list(past_key_values[0][0])[-2]
+
+ if position_ids is None:
+ position_ids = tf.expand_dims(tf.range(past_length, input_shape[-1] + past_length), axis=0)
+
+ if attention_mask is not None:
+ # We create a 3D attention mask from a 2D tensor mask.
+ # Sizes are [batch_size, 1, 1, to_seq_length]
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
+ # this attention mask is more simple than the triangular masking of causal attention
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
+ attention_mask_shape = shape_list(attention_mask)
+ attention_mask = tf.reshape(attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1]))
+
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
+ # masked positions, this operation will create a tensor which is 0.0 for
+ # positions we want to attend and -10000.0 for masked positions.
+ # Since we are adding it to the raw scores before the softmax, this is
+ # effectively the same as removing these entirely.
+ one_cst = tf.constant(1.0)
+ attention_mask = tf.cast(attention_mask, dtype=one_cst.dtype)
+ attention_mask = tf.multiply(tf.subtract(one_cst, attention_mask), tf.constant(-10000.0))
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ if head_mask is not None:
+ raise NotImplementedError
+ else:
+ head_mask = [None] * self.num_hidden_layers
+ # head_mask = tf.constant([0] * self.num_hidden_layers)
+
+ position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])
+
+ if inputs_embeds is None:
+ check_embeddings_within_bounds(input_ids, self.wte.vocab_size)
+ inputs_embeds = self.wte(input_ids, mode="embedding")
+
+ if token_type_ids is not None:
+ token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]])
+ token_type_embeds = self.wte(token_type_ids, mode="embedding")
+ else:
+ token_type_embeds = tf.constant(0.0)
+
+ token_type_embeds = tf.cast(token_type_embeds, dtype=inputs_embeds.dtype)
+ hidden_states = inputs_embeds + token_type_embeds
+ hidden_states = self.drop(hidden_states, training=training)
+
+ output_shape = input_shape + [shape_list(hidden_states)[-1]]
+
+ presents = () if use_cache else None
+ all_attentions = () if output_attentions else None
+ all_hidden_states = () if output_hidden_states else None
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)
+
+ outputs = block(
+ hidden_states=hidden_states,
+ layer_past=layer_past,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ head_mask=head_mask[i],
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ training=training,
+ )
+
+ hidden_states = outputs[0]
+ if use_cache:
+ presents = presents + (outputs[1],)
+
+ if output_attentions:
+ all_attentions = all_attentions + (outputs[2 if use_cache else 1],)
+
+ hidden_states = self.ln_f(hidden_states)
+
+ hidden_states = tf.reshape(hidden_states, output_shape)
+ # Add last hidden state
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if output_attentions:
+ # let the number of heads free (-1) so we can extract attention even after head pruning
+ attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]
+ all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, presents, all_hidden_states, all_attentions] if v is not None)
+
+ return TFBaseModelOutputWithPast(
+ last_hidden_state=hidden_states,
+ past_key_values=presents,
+ hidden_states=all_hidden_states,
+ attentions=all_attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "wte", None) is not None:
+ with tf.name_scope(self.wte.name):
+ self.wte.build(None)
+ if getattr(self, "ln_f", None) is not None:
+ with tf.name_scope(self.ln_f.name):
+ self.ln_f.build([None, None, self.embed_dim])
+ if getattr(self, "h", None) is not None:
+ for layer in self.h:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+class TFGPTJPreTrainedModel(TFPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = GPTJConfig
+ base_model_prefix = "transformer"
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
+ _keys_to_ignore_on_load_unexpected = [r"h.\d+.attn.bias"]
+
+
+GPTJ_START_DOCSTRING = r"""
+
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TensorFlow models and layers in `transformers` accept two formats as input:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional argument.
+
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
+ positional argument:
+
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
+
+ Note that when creating models and layers with
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
+ about any of this, as you can just pass inputs like you would to any other Python function!
+
+
+
+ Parameters:
+ config ([`GPTJConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+GPTJ_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, input_ids_length)`):
+ `input_ids_length` = `sequence_length` if `past` is `None` else `past[0].shape[-2]` (`sequence_length` of
+ input past key value states). Indices of input sequence tokens in the vocabulary.
+
+ If `past` is used, only input IDs that do not have their past calculated should be passed as `input_ids`.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
+ [`PreTrainedTokenizer.encode`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ past_key_values (`List[tf.Tensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see
+ `past` output below). Can be used to speed up sequential decoding. The token ids which have their past
+ given to this model should not be passed as input ids as they have already been computed.
+ attention_mask (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
+ config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used
+ in eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+"""
+
+
+@add_start_docstrings(
+ "The bare GPT-J Model transformer outputting raw hidden-states without any specific head on top.",
+ GPTJ_START_DOCSTRING,
+)
+class TFGPTJModel(TFGPTJPreTrainedModel):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.transformer = TFGPTJMainLayer(config, name="transformer")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFBaseModelOutputWithPast,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFBaseModelOutputWithPast, Tuple[tf.Tensor]]:
+ r"""
+ use_cache (`bool`, *optional*, defaults to `True`):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past`). Set to `False` during training, `True` during generation
+ """
+
+ outputs = self.transformer(
+ input_ids=input_ids,
+ past_key_values=past_key_values,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "transformer", None) is not None:
+ with tf.name_scope(self.transformer.name):
+ self.transformer.build(None)
+
+
+@add_start_docstrings(
+ """
+ The GPT-J Model transformer with a language modeling head on top.
+ """,
+ GPTJ_START_DOCSTRING,
+)
+class TFGPTJForCausalLM(TFGPTJPreTrainedModel, TFCausalLanguageModelingLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.transformer = TFGPTJMainLayer(config, name="transformer")
+ self.lm_head = keras.layers.Dense(
+ config.vocab_size, kernel_initializer=get_initializer(config.initializer_range), name="lm_head"
+ )
+ self.config = config
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def prepare_inputs_for_generation(self, inputs, past_key_values=None, use_cache=None, **kwargs):
+ token_type_ids = kwargs.get("token_type_ids", None)
+ # only last token for inputs_ids if past is defined in kwargs
+ if past_key_values:
+ inputs = tf.expand_dims(inputs[:, -1], -1)
+ if token_type_ids is not None:
+ token_type_ids = tf.expand_dims(token_type_ids[:, -1], -1)
+
+ position_ids = kwargs.get("position_ids", None)
+ attention_mask = kwargs.get("attention_mask", None)
+
+ if attention_mask is not None and position_ids is None:
+ position_ids = tf.math.cumsum(attention_mask, axis=-1, exclusive=True)
+ if past_key_values:
+ position_ids = tf.expand_dims(position_ids[:, -1], -1)
+
+ return {
+ "input_ids": inputs,
+ "attention_mask": attention_mask,
+ "position_ids": position_ids,
+ "past_key_values": past_key_values,
+ "use_cache": use_cache,
+ "token_type_ids": token_type_ids,
+ }
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFCausalLMOutputWithPast,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFCausalLMOutputWithPast, Tuple[tf.Tensor]]:
+ r"""
+ labels (`np.ndarray` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
+ """
+
+ transformer_outputs = self.transformer(
+ input_ids=input_ids,
+ past_key_values=past_key_values,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ hidden_states = transformer_outputs[0]
+ lm_logits = self.lm_head(hidden_states)
+
+ loss = None
+ if labels is not None:
+ # shift labels to the left and cut last logit token
+ shifted_logits = lm_logits[:, :-1]
+ labels = labels[:, 1:]
+ loss = self.hf_compute_loss(labels, shifted_logits)
+
+ if not return_dict:
+ output = (lm_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFCausalLMOutputWithPast(
+ loss=loss,
+ logits=lm_logits,
+ past_key_values=transformer_outputs.past_key_values,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "transformer", None) is not None:
+ with tf.name_scope(self.transformer.name):
+ self.transformer.build(None)
+ if getattr(self, "lm_head", None) is not None:
+ with tf.name_scope(self.lm_head.name):
+ self.lm_head.build([None, None, self.config.n_embd])
+
+
+@add_start_docstrings(
+ """
+ The GPT-J Model transformer with a sequence classification head on top (linear layer).
+
+ [`GPTJForSequenceClassification`] uses the last token in order to do the classification, as other causal models
+ (e.g. GPT, GPT-2, GPT-Neo) do.
+
+ Since it does classification on the last token, it requires to know the position of the last token. If a
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
+ each row of the batch).
+ """,
+ GPTJ_START_DOCSTRING,
+)
+class TFGPTJForSequenceClassification(TFGPTJPreTrainedModel, TFSequenceClassificationLoss):
+ _keys_to_ignore_on_load_missing = [r"h.\d+.attn.masked_bias", r"h.\d+.attn.bias", r"lm_head.weight"]
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.num_labels = config.num_labels
+ self.transformer = TFGPTJMainLayer(config, name="transformer")
+ self.score = keras.layers.Dense(
+ self.num_labels,
+ use_bias=False,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="score",
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFSequenceClassifierOutputWithPast,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFSequenceClassifierOutputWithPast, Tuple[tf.Tensor]]:
+ r"""
+ labels (`np.ndarray` or `tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+
+ transformer_outputs = self.transformer(
+ input_ids=input_ids,
+ past_key_values=past_key_values,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ hidden_states = transformer_outputs[0]
+ logits = self.score(hidden_states)
+ logits_shape = shape_list(logits)
+ in_logits = None
+ if self.config.pad_token_id is None:
+ sequence_lengths = -1
+ else:
+ if input_ids is not None:
+ sequence_lengths = (
+ tf.argmax(tf.cast(tf.math.equal(input_ids, self.config.pad_token_id), input_ids.dtype), axis=-1)
+ - 1
+ )
+ sequence_lengths = tf.where(
+ sequence_lengths >= 0,
+ sequence_lengths,
+ tf.cast(shape_list(input_ids[-1]), sequence_lengths.dtype) - 1,
+ )
+ in_logits = tf.gather(logits, sequence_lengths, batch_dims=1, axis=1)
+ else:
+ sequence_lengths = -1
+ logger.warning(
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
+ )
+ loss = None
+
+ if labels is not None:
+ if self.config.pad_token_id is None and logits_shape[0] != 1:
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
+
+ if not tf.is_tensor(sequence_lengths):
+ in_logits = logits[0 : logits_shape[0], sequence_lengths]
+
+ loss = self.hf_compute_loss(tf.reshape(labels, [-1]), tf.reshape(in_logits, [-1, self.num_labels]))
+ pooled_logits = in_logits if in_logits is not None else logits
+
+ if not return_dict:
+ output = (pooled_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFSequenceClassifierOutputWithPast(
+ loss=loss,
+ logits=pooled_logits,
+ past_key_values=transformer_outputs.past_key_values,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "transformer", None) is not None:
+ with tf.name_scope(self.transformer.name):
+ self.transformer.build(None)
+ if getattr(self, "score", None) is not None:
+ with tf.name_scope(self.score.name):
+ self.score.build([None, None, self.config.n_embd])
+
+
+@add_start_docstrings(
+ """
+ The GPT-J Model transformer with a span classification head on top for extractive question-answering tasks like
+ SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ GPTJ_START_DOCSTRING,
+)
+class TFGPTJForQuestionAnswering(TFGPTJPreTrainedModel, TFQuestionAnsweringLoss):
+ _keys_to_ignore_on_load_missing = [r"h.\d+.attn.masked_bias", r"h.\d+.attn.bias", r"lm_head.weight"]
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.num_labels = config.num_labels
+ self.transformer = TFGPTJMainLayer(config, name="transformer")
+ self.qa_outputs = keras.layers.Dense(
+ self.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFQuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ start_positions: np.ndarray | tf.Tensor | None = None,
+ end_positions: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
+ r"""
+ start_positions (`np.ndarray` or `tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`np.ndarray` or `tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+
+ transformer_outputs = self.transformer(
+ input_ids=input_ids,
+ past_key_values=past_key_values,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = transformer_outputs[0]
+
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = tf.split(logits, 2, axis=-1)
+ start_logits = tf.squeeze(start_logits, axis=-1)
+ end_logits = tf.squeeze(end_logits, axis=-1)
+
+ loss = None
+ if start_positions is not None and end_positions is not None:
+ labels = {"start_position": start_positions}
+ labels["end_position"] = end_positions
+ loss = self.hf_compute_loss(labels, (start_logits, end_logits))
+
+ if not return_dict:
+ output = (start_logits, end_logits) + transformer_outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFQuestionAnsweringModelOutput(
+ loss=loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "transformer", None) is not None:
+ with tf.name_scope(self.transformer.name):
+ self.transformer.build(None)
+ if getattr(self, "qa_outputs", None) is not None:
+ with tf.name_scope(self.qa_outputs.name):
+ self.qa_outputs.build([None, None, self.config.hidden_size])
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d0de4a00bd15005fe974f7240b9bc6c940f5b789
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/__init__.py
@@ -0,0 +1,97 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
+
+
+_import_structure = {
+ "configuration_groupvit": [
+ "GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
+ "GroupViTConfig",
+ "GroupViTOnnxConfig",
+ "GroupViTTextConfig",
+ "GroupViTVisionConfig",
+ ],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_groupvit"] = [
+ "GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "GroupViTModel",
+ "GroupViTPreTrainedModel",
+ "GroupViTTextModel",
+ "GroupViTVisionModel",
+ ]
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_groupvit"] = [
+ "TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TFGroupViTModel",
+ "TFGroupViTPreTrainedModel",
+ "TFGroupViTTextModel",
+ "TFGroupViTVisionModel",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_groupvit import (
+ GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
+ GroupViTConfig,
+ GroupViTOnnxConfig,
+ GroupViTTextConfig,
+ GroupViTVisionConfig,
+ )
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_groupvit import (
+ GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ GroupViTModel,
+ GroupViTPreTrainedModel,
+ GroupViTTextModel,
+ GroupViTVisionModel,
+ )
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_groupvit import (
+ TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TFGroupViTModel,
+ TFGroupViTPreTrainedModel,
+ TFGroupViTTextModel,
+ TFGroupViTVisionModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/configuration_groupvit.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/configuration_groupvit.py
new file mode 100644
index 0000000000000000000000000000000000000000..bfec885244948cdd9fc93ca8af5105ba5c2866e0
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/configuration_groupvit.py
@@ -0,0 +1,453 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" GroupViT model configuration"""
+
+import os
+from collections import OrderedDict
+from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
+
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxConfig
+from ...utils import logging
+
+
+if TYPE_CHECKING:
+ from ...processing_utils import ProcessorMixin
+ from ...utils import TensorType
+
+
+logger = logging.get_logger(__name__)
+
+GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "nvidia/groupvit-gcc-yfcc": "https://huggingface.co/nvidia/groupvit-gcc-yfcc/resolve/main/config.json",
+}
+
+
+class GroupViTTextConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`GroupViTTextModel`]. It is used to instantiate an
+ GroupViT model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the GroupViT
+ [nvidia/groupvit-gcc-yfcc](https://huggingface.co/nvidia/groupvit-gcc-yfcc) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 49408):
+ Vocabulary size of the GroupViT text model. Defines the number of different tokens that can be represented
+ by the `inputs_ids` passed when calling [`GroupViTModel`].
+ hidden_size (`int`, *optional*, defaults to 256):
+ Dimensionality of the encoder layers and the pooler layer.
+ intermediate_size (`int`, *optional*, defaults to 1024):
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 4):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ max_position_embeddings (`int`, *optional*, defaults to 77):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-5):
+ The epsilon used by the layer normalization layers.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ dropout (`float`, *optional*, defaults to 0.0):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ initializer_factor (`float`, *optional*, defaults to 1.0):
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
+ testing).
+
+ Example:
+
+ ```python
+ >>> from transformers import GroupViTTextConfig, GroupViTTextModel
+
+ >>> # Initializing a GroupViTTextModel with nvidia/groupvit-gcc-yfcc style configuration
+ >>> configuration = GroupViTTextConfig()
+
+ >>> model = GroupViTTextModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "groupvit_text_model"
+
+ def __init__(
+ self,
+ vocab_size=49408,
+ hidden_size=256,
+ intermediate_size=1024,
+ num_hidden_layers=12,
+ num_attention_heads=4,
+ max_position_embeddings=77,
+ hidden_act="quick_gelu",
+ layer_norm_eps=1e-5,
+ dropout=0.0,
+ attention_dropout=0.0,
+ initializer_range=0.02,
+ initializer_factor=1.0,
+ pad_token_id=1,
+ bos_token_id=49406,
+ eos_token_id=49407,
+ **kwargs,
+ ):
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
+
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.dropout = dropout
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.max_position_embeddings = max_position_embeddings
+ self.layer_norm_eps = layer_norm_eps
+ self.hidden_act = hidden_act
+ self.initializer_range = initializer_range
+ self.initializer_factor = initializer_factor
+ self.attention_dropout = attention_dropout
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
+ cls._set_token_in_kwargs(kwargs)
+
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
+
+ # get the text config dict if we are loading from GroupViTConfig
+ if config_dict.get("model_type") == "groupvit":
+ config_dict = config_dict["text_config"]
+
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
+ logger.warning(
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
+ )
+
+ return cls.from_dict(config_dict, **kwargs)
+
+
+class GroupViTVisionConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`GroupViTVisionModel`]. It is used to instantiate
+ an GroupViT model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the GroupViT
+ [nvidia/groupvit-gcc-yfcc](https://huggingface.co/nvidia/groupvit-gcc-yfcc) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ hidden_size (`int`, *optional*, defaults to 384):
+ Dimensionality of the encoder layers and the pooler layer.
+ intermediate_size (`int`, *optional*, defaults to 1536):
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ depths (`List[int]`, *optional*, defaults to [6, 3, 3]):
+ The number of layers in each encoder block.
+ num_group_tokens (`List[int]`, *optional*, defaults to [64, 8, 0]):
+ The number of group tokens for each stage.
+ num_output_groups (`List[int]`, *optional*, defaults to [64, 8, 8]):
+ The number of output groups for each stage, 0 means no group.
+ num_attention_heads (`int`, *optional*, defaults to 6):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ image_size (`int`, *optional*, defaults to 224):
+ The size (resolution) of each image.
+ patch_size (`int`, *optional*, defaults to 16):
+ The size (resolution) of each patch.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-5):
+ The epsilon used by the layer normalization layers.
+ dropout (`float`, *optional*, defaults to 0.0):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ initializer_factor (`float`, *optional*, defaults to 1.0):
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
+ testing).
+
+ Example:
+
+ ```python
+ >>> from transformers import GroupViTVisionConfig, GroupViTVisionModel
+
+ >>> # Initializing a GroupViTVisionModel with nvidia/groupvit-gcc-yfcc style configuration
+ >>> configuration = GroupViTVisionConfig()
+
+ >>> model = GroupViTVisionModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "groupvit_vision_model"
+
+ def __init__(
+ self,
+ hidden_size=384,
+ intermediate_size=1536,
+ depths=[6, 3, 3],
+ num_hidden_layers=12,
+ num_group_tokens=[64, 8, 0],
+ num_output_groups=[64, 8, 8],
+ num_attention_heads=6,
+ image_size=224,
+ patch_size=16,
+ num_channels=3,
+ hidden_act="gelu",
+ layer_norm_eps=1e-5,
+ dropout=0.0,
+ attention_dropout=0.0,
+ initializer_range=0.02,
+ initializer_factor=1.0,
+ assign_eps=1.0,
+ assign_mlp_ratio=[0.5, 4],
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.depths = depths
+ if num_hidden_layers != sum(depths):
+ logger.warning(
+ f"Manually setting num_hidden_layers to {num_hidden_layers}, but we expect num_hidden_layers ="
+ f" sum(depth) = {sum(depths)}"
+ )
+ self.num_hidden_layers = num_hidden_layers
+ self.num_group_tokens = num_group_tokens
+ self.num_output_groups = num_output_groups
+ self.num_attention_heads = num_attention_heads
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.hidden_act = hidden_act
+ self.layer_norm_eps = layer_norm_eps
+ self.dropout = dropout
+ self.attention_dropout = attention_dropout
+ self.initializer_range = initializer_range
+ self.initializer_factor = initializer_factor
+ self.assign_eps = assign_eps
+ self.assign_mlp_ratio = assign_mlp_ratio
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
+ cls._set_token_in_kwargs(kwargs)
+
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
+
+ # get the vision config dict if we are loading from GroupViTConfig
+ if config_dict.get("model_type") == "groupvit":
+ config_dict = config_dict["vision_config"]
+
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
+ logger.warning(
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
+ )
+
+ return cls.from_dict(config_dict, **kwargs)
+
+
+class GroupViTConfig(PretrainedConfig):
+ r"""
+ [`GroupViTConfig`] is the configuration class to store the configuration of a [`GroupViTModel`]. It is used to
+ instantiate a GroupViT model according to the specified arguments, defining the text model and vision model
+ configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the GroupViT
+ [nvidia/groupvit-gcc-yfcc](https://huggingface.co/nvidia/groupvit-gcc-yfcc) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ text_config (`dict`, *optional*):
+ Dictionary of configuration options used to initialize [`GroupViTTextConfig`].
+ vision_config (`dict`, *optional*):
+ Dictionary of configuration options used to initialize [`GroupViTVisionConfig`].
+ projection_dim (`int`, *optional*, defaults to 256):
+ Dimentionality of text and vision projection layers.
+ projection_intermediate_dim (`int`, *optional*, defaults to 4096):
+ Dimentionality of intermediate layer of text and vision projection layers.
+ logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
+ The inital value of the *logit_scale* parameter. Default is used as per the original GroupViT
+ implementation.
+ kwargs (*optional*):
+ Dictionary of keyword arguments.
+ """
+
+ model_type = "groupvit"
+
+ def __init__(
+ self,
+ text_config=None,
+ vision_config=None,
+ projection_dim=256,
+ projection_intermediate_dim=4096,
+ logit_scale_init_value=2.6592,
+ **kwargs,
+ ):
+ # If `_config_dict` exist, we use them for the backward compatibility.
+ # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
+ # of confusion!).
+ text_config_dict = kwargs.pop("text_config_dict", None)
+ vision_config_dict = kwargs.pop("vision_config_dict", None)
+
+ super().__init__(**kwargs)
+
+ # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
+ # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
+ # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
+ if text_config_dict is not None:
+ if text_config is None:
+ text_config = {}
+
+ # This is the complete result when using `text_config_dict`.
+ _text_config_dict = GroupViTTextConfig(**text_config_dict).to_dict()
+
+ # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
+ for key, value in _text_config_dict.items():
+ if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
+ # If specified in `text_config_dict`
+ if key in text_config_dict:
+ message = (
+ f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
+ f'The value `text_config_dict["{key}"]` will be used instead.'
+ )
+ # If inferred from default argument values (just to be super careful)
+ else:
+ message = (
+ f"`text_config_dict` is provided which will be used to initialize `GroupViTTextConfig`. "
+ f'The value `text_config["{key}"]` will be overriden.'
+ )
+ logger.info(message)
+
+ # Update all values in `text_config` with the ones in `_text_config_dict`.
+ text_config.update(_text_config_dict)
+
+ if vision_config_dict is not None:
+ if vision_config is None:
+ vision_config = {}
+
+ # This is the complete result when using `vision_config_dict`.
+ _vision_config_dict = GroupViTVisionConfig(**vision_config_dict).to_dict()
+ # convert keys to string instead of integer
+ if "id2label" in _vision_config_dict:
+ _vision_config_dict["id2label"] = {
+ str(key): value for key, value in _vision_config_dict["id2label"].items()
+ }
+
+ # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
+ for key, value in _vision_config_dict.items():
+ if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
+ # If specified in `vision_config_dict`
+ if key in vision_config_dict:
+ message = (
+ f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
+ f'values. The value `vision_config_dict["{key}"]` will be used instead.'
+ )
+ # If inferred from default argument values (just to be super careful)
+ else:
+ message = (
+ f"`vision_config_dict` is provided which will be used to initialize `GroupViTVisionConfig`."
+ f' The value `vision_config["{key}"]` will be overriden.'
+ )
+ logger.info(message)
+
+ # Update all values in `vision_config` with the ones in `_vision_config_dict`.
+ vision_config.update(_vision_config_dict)
+
+ if text_config is None:
+ text_config = {}
+ logger.info("`text_config` is `None`. Initializing the `GroupViTTextConfig` with default values.")
+
+ if vision_config is None:
+ vision_config = {}
+ logger.info("`vision_config` is `None`. initializing the `GroupViTVisionConfig` with default values.")
+
+ self.text_config = GroupViTTextConfig(**text_config)
+ self.vision_config = GroupViTVisionConfig(**vision_config)
+
+ self.projection_dim = projection_dim
+ self.projection_intermediate_dim = projection_intermediate_dim
+ self.logit_scale_init_value = logit_scale_init_value
+ self.initializer_range = 0.02
+ self.initializer_factor = 1.0
+ self.output_segmentation = False
+
+ @classmethod
+ def from_text_vision_configs(cls, text_config: GroupViTTextConfig, vision_config: GroupViTVisionConfig, **kwargs):
+ r"""
+ Instantiate a [`GroupViTConfig`] (or a derived class) from groupvit text model configuration and groupvit
+ vision model configuration.
+
+ Returns:
+ [`GroupViTConfig`]: An instance of a configuration object
+ """
+
+ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
+
+
+class GroupViTOnnxConfig(OnnxConfig):
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ return OrderedDict(
+ [
+ ("input_ids", {0: "batch", 1: "sequence"}),
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
+ ("attention_mask", {0: "batch", 1: "sequence"}),
+ ]
+ )
+
+ @property
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
+ return OrderedDict(
+ [
+ ("logits_per_image", {0: "batch"}),
+ ("logits_per_text", {0: "batch"}),
+ ("text_embeds", {0: "batch"}),
+ ("image_embeds", {0: "batch"}),
+ ]
+ )
+
+ @property
+ def atol_for_validation(self) -> float:
+ return 1e-4
+
+ def generate_dummy_inputs(
+ self,
+ processor: "ProcessorMixin",
+ batch_size: int = -1,
+ seq_length: int = -1,
+ framework: Optional["TensorType"] = None,
+ ) -> Mapping[str, Any]:
+ text_input_dict = super().generate_dummy_inputs(
+ processor.tokenizer, batch_size=batch_size, seq_length=seq_length, framework=framework
+ )
+ image_input_dict = super().generate_dummy_inputs(
+ processor.image_processor, batch_size=batch_size, framework=framework
+ )
+ return {**text_input_dict, **image_input_dict}
+
+ @property
+ def default_onnx_opset(self) -> int:
+ return 14
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/convert_groupvit_nvlab_to_hf.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/convert_groupvit_nvlab_to_hf.py
new file mode 100644
index 0000000000000000000000000000000000000000..059f10f6129bee62bd62a2c0d75fd1be555d6409
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/convert_groupvit_nvlab_to_hf.py
@@ -0,0 +1,217 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Convert GroupViT checkpoints from the original repository.
+
+URL: https://github.com/NVlabs/GroupViT
+"""
+
+import argparse
+
+import requests
+import torch
+from PIL import Image
+
+from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
+
+
+def rename_key(name):
+ # vision encoder
+ if "img_encoder.pos_embed" in name:
+ name = name.replace("img_encoder.pos_embed", "vision_model.embeddings.position_embeddings")
+ if "img_encoder.patch_embed.proj" in name:
+ name = name.replace("img_encoder.patch_embed.proj", "vision_model.embeddings.patch_embeddings.projection")
+ if "img_encoder.patch_embed.norm" in name:
+ name = name.replace("img_encoder.patch_embed.norm", "vision_model.embeddings.layernorm")
+ if "img_encoder.layers" in name:
+ name = name.replace("img_encoder.layers", "vision_model.encoder.stages")
+ if "blocks" in name and "res" not in name:
+ name = name.replace("blocks", "layers")
+ if "attn" in name and "pre_assign" not in name:
+ name = name.replace("attn", "self_attn")
+ if "proj" in name and "self_attn" in name and "text" not in name:
+ name = name.replace("proj", "out_proj")
+ if "pre_assign_attn.attn.proj" in name:
+ name = name.replace("pre_assign_attn.attn.proj", "pre_assign_attn.attn.out_proj")
+ if "norm1" in name:
+ name = name.replace("norm1", "layer_norm1")
+ if "norm2" in name and "pre_assign" not in name:
+ name = name.replace("norm2", "layer_norm2")
+ if "img_encoder.norm" in name:
+ name = name.replace("img_encoder.norm", "vision_model.layernorm")
+ # text encoder
+ if "text_encoder.token_embedding" in name:
+ name = name.replace("text_encoder.token_embedding", "text_model.embeddings.token_embedding")
+ if "text_encoder.positional_embedding" in name:
+ name = name.replace("text_encoder.positional_embedding", "text_model.embeddings.position_embedding.weight")
+ if "text_encoder.transformer.resblocks." in name:
+ name = name.replace("text_encoder.transformer.resblocks.", "text_model.encoder.layers.")
+ if "ln_1" in name:
+ name = name.replace("ln_1", "layer_norm1")
+ if "ln_2" in name:
+ name = name.replace("ln_2", "layer_norm2")
+ if "c_fc" in name:
+ name = name.replace("c_fc", "fc1")
+ if "c_proj" in name:
+ name = name.replace("c_proj", "fc2")
+ if "text_encoder" in name:
+ name = name.replace("text_encoder", "text_model")
+ if "ln_final" in name:
+ name = name.replace("ln_final", "final_layer_norm")
+ # projection layers
+ if "img_projector.linear_hidden." in name:
+ name = name.replace("img_projector.linear_hidden.", "visual_projection.")
+ if "img_projector.linear_out." in name:
+ name = name.replace("img_projector.linear_out.", "visual_projection.3.")
+ if "text_projector.linear_hidden" in name:
+ name = name.replace("text_projector.linear_hidden", "text_projection")
+ if "text_projector.linear_out" in name:
+ name = name.replace("text_projector.linear_out", "text_projection.3")
+
+ return name
+
+
+def convert_state_dict(orig_state_dict, config):
+ for key in orig_state_dict.copy().keys():
+ val = orig_state_dict.pop(key)
+
+ if "qkv" in key:
+ # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
+ # we need to split them up into separate matrices/vectors
+ key_split = key.split(".")
+ stage_num, layer_num = int(key_split[2]), int(key_split[4])
+ dim = config.vision_config.hidden_size
+ if "weight" in key:
+ orig_state_dict[
+ f"vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.q_proj.weight"
+ ] = val[:dim, :]
+ orig_state_dict[
+ f"vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.k_proj.weight"
+ ] = val[dim : dim * 2, :]
+ orig_state_dict[
+ f"vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.v_proj.weight"
+ ] = val[-dim:, :]
+ else:
+ orig_state_dict[
+ f"vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.q_proj.bias"
+ ] = val[:dim]
+ orig_state_dict[
+ f"vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.k_proj.bias"
+ ] = val[dim : dim * 2]
+ orig_state_dict[
+ f"vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.v_proj.bias"
+ ] = val[-dim:]
+ elif "in_proj" in key:
+ # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
+ # we need to split them up into separate matrices/vectors
+ key_split = key.split(".")
+ layer_num = int(key_split[3])
+ dim = config.text_config.hidden_size
+ if "weight" in key:
+ orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.q_proj.weight"] = val[:dim, :]
+ orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.k_proj.weight"] = val[
+ dim : dim * 2, :
+ ]
+ orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.v_proj.weight"] = val[-dim:, :]
+ else:
+ orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.q_proj.bias"] = val[:dim]
+ orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.k_proj.bias"] = val[dim : dim * 2]
+ orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.v_proj.bias"] = val[-dim:]
+ else:
+ new_name = rename_key(key)
+ # squeeze if necessary
+ if (
+ "text_projection.0" in new_name
+ or "text_projection.3" in new_name
+ or "visual_projection.0" in new_name
+ or "visual_projection.3" in new_name
+ ):
+ orig_state_dict[new_name] = val.squeeze_()
+ else:
+ orig_state_dict[new_name] = val
+
+ return orig_state_dict
+
+
+# We will verify our results on an image of cute cats
+def prepare_img():
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ im = Image.open(requests.get(url, stream=True).raw)
+ return im
+
+
+@torch.no_grad()
+def convert_groupvit_checkpoint(
+ checkpoint_path, pytorch_dump_folder_path, model_name="groupvit-gcc-yfcc", push_to_hub=False
+):
+ """
+ Copy/paste/tweak model's weights to the Transformers design.
+ """
+ config = GroupViTConfig()
+ model = GroupViTModel(config).eval()
+
+ state_dict = torch.load(checkpoint_path, map_location="cpu")["model"]
+ new_state_dict = convert_state_dict(state_dict, config)
+ missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=False)
+ assert missing_keys == ["text_model.embeddings.position_ids"]
+ assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(unexpected_keys) == 0)
+
+ # verify result
+ processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
+ image = prepare_img()
+ inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, padding=True, return_tensors="pt")
+
+ with torch.no_grad():
+ outputs = model(**inputs)
+
+ if model_name == "groupvit-gcc-yfcc":
+ expected_logits = torch.tensor([[13.3523, 6.3629]])
+ elif model_name == "groupvit-gcc-redcaps":
+ expected_logits = torch.tensor([[16.1873, 8.6230]])
+ else:
+ raise ValueError(f"Model name {model_name} not supported.")
+ assert torch.allclose(outputs.logits_per_image, expected_logits, atol=1e-3)
+
+ processor.save_pretrained(pytorch_dump_folder_path)
+ model.save_pretrained(pytorch_dump_folder_path)
+ print("Successfully saved processor and model to", pytorch_dump_folder_path)
+
+ if push_to_hub:
+ print("Pushing to the hub...")
+ processor.push_to_hub(model_name, organization="nielsr")
+ model.push_to_hub(model_name, organization="nielsr")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
+ )
+ parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
+ parser.add_argument(
+ "--model_name",
+ default="groupvit-gccy-fcc",
+ type=str,
+ help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
+ )
+ parser.add_argument(
+ "--push_to_hub",
+ action="store_true",
+ help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
+ )
+ args = parser.parse_args()
+
+ convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/modeling_tf_groupvit.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/modeling_tf_groupvit.py
new file mode 100644
index 0000000000000000000000000000000000000000..d04f9afb7d3599f3b84bf7d8937fb04830dd064e
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/groupvit/modeling_tf_groupvit.py
@@ -0,0 +1,2135 @@
+# coding=utf-8
+# Copyright 2022 NVIDIA and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" TF 2.0 GroupViT model."""
+
+
+from __future__ import annotations
+
+import collections.abc
+import math
+from dataclasses import dataclass
+from typing import Any, Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ...activations_tf import get_tf_activation
+from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling
+from ...modeling_tf_utils import (
+ TFModelInputType,
+ TFPreTrainedModel,
+ get_initializer,
+ keras,
+ keras_serializable,
+ unpack_inputs,
+)
+from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
+from ...utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_tensorflow_probability_available,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_groupvit import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig
+
+
+logger = logging.get_logger(__name__)
+
+# soft dependency
+if is_tensorflow_probability_available():
+ try:
+ import tensorflow_probability as tfp
+
+ # On the first call, check whether a compatible version of TensorFlow is installed
+ # TensorFlow Probability depends on a recent stable release of TensorFlow
+ _ = tfp.distributions.Normal(loc=0.0, scale=1.0)
+ except ImportError:
+ logger.error(
+ "GroupViT models are not usable since `tensorflow_probability` can't be loaded. "
+ "It seems you have `tensorflow_probability` installed with the wrong tensorflow version."
+ "Please try to reinstall it following the instructions here: https://github.com/tensorflow/probability."
+ )
+
+_CHECKPOINT_FOR_DOC = "nvidia/groupvit-gcc-yfcc"
+
+TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "nvidia/groupvit-gcc-yfcc",
+ # See all GroupViT models at https://huggingface.co/models?filter=groupvit
+]
+
+
+LARGE_NEGATIVE = -1e8
+
+
+# Copied from transformers.models.bart.modeling_tf_bart._expand_mask
+def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None):
+ """
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
+ """
+ src_len = shape_list(mask)[1]
+ tgt_len = tgt_len if tgt_len is not None else src_len
+ one_cst = tf.constant(1.0)
+ mask = tf.cast(mask, dtype=one_cst.dtype)
+ expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
+
+ return (one_cst - expanded_mask) * LARGE_NEGATIVE
+
+
+# contrastive loss function, adapted from
+# https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html
+def contrastive_loss(logits: tf.Tensor) -> tf.Tensor:
+ return tf.math.reduce_mean(
+ keras.metrics.sparse_categorical_crossentropy(
+ y_true=tf.range(shape_list(logits)[0]), y_pred=logits, from_logits=True
+ )
+ )
+
+
+# Copied from transformers.models.clip.modeling_tf_clip.clip_loss with clip->groupvit
+def groupvit_loss(similarity: tf.Tensor) -> tf.Tensor:
+ caption_loss = contrastive_loss(similarity)
+ image_loss = contrastive_loss(tf.transpose(similarity))
+ return (caption_loss + image_loss) / 2.0
+
+
+def hard_softmax(logits: tf.Tensor, dim: int) -> tf.Tensor:
+ y_soft = stable_softmax(logits, dim)
+ # Straight through.
+ index = tf.argmax(y_soft, dim)
+ y_hard = tf.one_hot(
+ index,
+ depth=shape_list(logits)[dim],
+ # TensorFlow expects axis to be -1 or between [0, 3). But received: -2
+ # This is why the following code snippet is used.
+ axis=range(len(shape_list(logits)))[dim],
+ dtype=y_soft.dtype,
+ )
+ ret = y_hard - tf.stop_gradient(y_soft) + y_soft
+
+ return ret
+
+
+def gumbel_softmax(logits: tf.Tensor, tau: float = 1, hard: bool = False, dim: int = -1) -> tf.Tensor:
+ gumbel_dist = tfp.distributions.Gumbel(0.0, 1.0)
+ gumbels = gumbel_dist.sample(tf.shape(logits), dtype=logits.dtype)
+
+ gumbels = (logits + gumbels) / tau # ~Gumbel(logits,tau)
+ y_soft = stable_softmax(gumbels, dim)
+
+ if hard:
+ # Straight through.
+ index = tf.argmax(y_soft, dim)
+ y_hard = tf.one_hot(
+ index,
+ depth=shape_list(logits)[dim],
+ # TensorFlow expects axis to be -1 or between [0, 3). But received: -2
+ # This is why the following code snippet is used.
+ axis=range(len(shape_list(logits)))[dim],
+ dtype=y_soft.dtype,
+ )
+ ret = y_hard - tf.stop_gradient(y_soft) + y_soft
+ else:
+ # Reparametrization trick.
+ ret = y_soft
+ return ret
+
+
+def resize_attention_map(attentions: tf.Tensor, height: int, width: int, align_corners: bool = False) -> tf.Tensor:
+ """
+ Args:
+ attentions (`tf.Tensor`): attention map of shape [batch_size, groups, feat_height*feat_width]
+ height (`int`): height of the output attention map
+ width (`int`): width of the output attention map
+ align_corners (`bool`, *optional*): the `align_corner` argument for `nn.functional.interpolate`.
+
+ Returns:
+ `tf.Tensor`: resized attention map of shape [batch_size, groups, height, width]
+ """
+
+ scale = (height * width // attentions.shape[2]) ** 0.5
+ if height > width:
+ feat_width = int(np.round(width / scale))
+ feat_height = shape_list(attentions)[2] // feat_width
+ else:
+ feat_height = int(np.round(height / scale))
+ feat_width = shape_list(attentions)[2] // feat_height
+
+ batch_size = shape_list(attentions)[0]
+ groups = shape_list(attentions)[1] # number of group token
+ # [batch_size, groups, height x width, groups] -> [batch_size, groups, height, width]
+ attentions = tf.reshape(attentions, (batch_size, groups, feat_height, feat_width))
+ attentions = tf.transpose(attentions, perm=(0, 2, 3, 1))
+ if align_corners:
+ attentions = tf.compat.v1.image.resize(
+ attentions,
+ size=(height, width),
+ method="bilinear",
+ align_corners=align_corners,
+ )
+ else:
+ attentions = tf.image.resize(attentions, size=(height, width), method="bilinear")
+ attentions = tf.transpose(attentions, perm=(0, 3, 1, 2))
+ return attentions
+
+
+def get_grouping_from_attentions(attentions: Tuple[tf.Tensor], hw_shape: Tuple[int]) -> tf.Tensor:
+ """
+ Args:
+ attentions (`tuple(tf.Tensor)`: tuple of attention maps returned by `TFGroupViTVisionTransformer`
+ hw_shape (`tuple(int)`): height and width of the output attention map
+ Returns:
+ `tf.Tensor`: the attention map of shape [batch_size, groups, height, width]
+ """
+
+ attn_maps = []
+ prev_attn_masks = None
+ for attn_masks in attentions:
+ # [batch_size, num_groups, height x width] -> [batch_size, height x width, num_groups]
+ attn_masks = tf.transpose(attn_masks, perm=(0, 2, 1))
+ if prev_attn_masks is None:
+ prev_attn_masks = attn_masks
+ else:
+ prev_attn_masks = tf.matmul(prev_attn_masks, attn_masks)
+ # [batch_size, height x width, num_groups] -> [batch_size, num_groups, height x width] -> [batch_size, num_groups, height, width]
+ cur_attn_map = resize_attention_map(tf.transpose(prev_attn_masks, perm=(0, 2, 1)), *hw_shape)
+ attn_maps.append(cur_attn_map)
+
+ # [batch_size, num_groups, height, width]
+ final_grouping = attn_maps[-1]
+
+ return tf.stop_gradient(final_grouping)
+
+
+@dataclass
+class TFGroupViTModelOutput(ModelOutput):
+ """
+ Args:
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
+ Contrastive loss for image-text similarity.
+ logits_per_image (`tf.Tensor` of shape `(image_batch_size, text_batch_size)`):
+ The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
+ similarity scores.
+ logits_per_text (`tf.Tensor` of shape `(text_batch_size, image_batch_size)`):
+ The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
+ similarity scores.
+ segmentation_logits (`tf.Tensor` of shape `(batch_size, config.num_labels, logits_height, logits_width)`):
+ Classification scores for each pixel.
+
+
+
+ The logits returned do not necessarily have the same size as the `pixel_values` passed as inputs. This is
+ to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the
+ original image size as post-processing. You should always check your logits shape and resize as needed.
+
+
+
+ text_embeds (`tf.Tensor` of shape `(batch_size, output_dim`):
+ The text embeddings obtained by applying the projection layer to the pooled output of
+ [`TFGroupViTTextModel`].
+ image_embeds (`tf.Tensor` of shape `(batch_size, output_dim`):
+ The image embeddings obtained by applying the projection layer to the pooled output of
+ [`TFGroupViTVisionModel`].
+ text_model_output (`TFBaseModelOutputWithPooling`):
+ The output of the [`TFGroupViTTextModel`].
+ vision_model_output (`TFBaseModelOutputWithPooling`):
+ The output of the [`TFGroupViTVisionModel`].
+ """
+
+ loss: tf.Tensor | None = None
+ logits_per_image: tf.Tensor = None
+ logits_per_text: tf.Tensor = None
+ segmentation_logits: tf.Tensor = None
+ text_embeds: tf.Tensor = None
+ image_embeds: tf.Tensor = None
+ text_model_output: TFBaseModelOutputWithPooling = None
+ vision_model_output: TFBaseModelOutputWithPooling = None
+
+ def to_tuple(self) -> Tuple[Any]:
+ return tuple(
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
+ for k in self.keys()
+ )
+
+
+class TFGroupViTCrossAttentionLayer(keras.layers.Layer):
+ def __init__(self, config: GroupViTVisionConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.attn = TFGroupViTAttention(config, name="attn")
+ self.norm2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm2")
+ self.mlp = TFGroupViTMLP(config, name="mlp")
+ self.norm_post = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm_post")
+ self.config = config
+
+ def call(self, query: tf.Tensor, key: tf.Tensor, training: bool = False) -> tf.Tensor:
+ x = query
+ x = x + self.attn(query, encoder_hidden_states=key)[0]
+ x = x + self.mlp(self.norm2(x))
+ x = self.norm_post(x)
+ return x
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "attn", None) is not None:
+ with tf.name_scope(self.attn.name):
+ self.attn.build(None)
+ if getattr(self, "norm2", None) is not None:
+ with tf.name_scope(self.norm2.name):
+ self.norm2.build([None, None, self.config.hidden_size])
+ if getattr(self, "mlp", None) is not None:
+ with tf.name_scope(self.mlp.name):
+ self.mlp.build(None)
+ if getattr(self, "norm_post", None) is not None:
+ with tf.name_scope(self.norm_post.name):
+ self.norm_post.build([None, None, self.config.hidden_size])
+
+
+class TFGroupViTAssignAttention(keras.layers.Layer):
+ def __init__(self, config: GroupViTVisionConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.scale = config.hidden_size**-0.5
+
+ self.q_proj = keras.layers.Dense(config.hidden_size, name="q_proj")
+ self.k_proj = keras.layers.Dense(config.hidden_size, name="k_proj")
+ self.v_proj = keras.layers.Dense(config.hidden_size, name="v_proj")
+ self.proj = keras.layers.Dense(config.hidden_size, name="proj")
+ self.assign_eps = config.assign_eps
+ self.config = config
+
+ def get_attn(self, attn: tf.Tensor, gumbel: bool = True, hard: bool = True, training: bool = False) -> tf.Tensor:
+ if gumbel and training:
+ attn = gumbel_softmax(attn, dim=-2, hard=hard)
+ else:
+ if hard:
+ attn = hard_softmax(attn, dim=-2)
+ else:
+ attn = stable_softmax(attn, axis=-2)
+
+ return attn
+
+ def call(self, query: tf.Tensor, key: tf.Tensor, training: bool = False):
+ value = key
+ # [batch_size, query_length, channels]
+ query = self.q_proj(query)
+
+ # [batch_size, key_length, channels]
+ key = self.k_proj(key)
+
+ # [batch_size, key_length, channels]
+ value = self.v_proj(value)
+
+ # [batch_size, query_length, key_length]
+ raw_attn = tf.matmul(query, key, transpose_b=True) * self.scale
+
+ attn = self.get_attn(raw_attn, training=training)
+ soft_attn = self.get_attn(raw_attn, training=training, gumbel=False, hard=False)
+
+ attn = attn / (tf.math.reduce_sum(attn, axis=-1, keepdims=True) + self.assign_eps)
+
+ out = tf.matmul(attn, value)
+
+ out = self.proj(out)
+
+ return out, soft_attn
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "q_proj", None) is not None:
+ with tf.name_scope(self.q_proj.name):
+ self.q_proj.build([None, None, self.config.hidden_size])
+ if getattr(self, "k_proj", None) is not None:
+ with tf.name_scope(self.k_proj.name):
+ self.k_proj.build([None, None, self.config.hidden_size])
+ if getattr(self, "v_proj", None) is not None:
+ with tf.name_scope(self.v_proj.name):
+ self.v_proj.build([None, None, self.config.hidden_size])
+ if getattr(self, "proj", None) is not None:
+ with tf.name_scope(self.proj.name):
+ self.proj.build([None, None, self.config.hidden_size])
+
+
+class TFGroupViTTokenAssign(keras.layers.Layer):
+ def __init__(self, config: GroupViTVisionConfig, num_group_token: int, num_output_group: int, **kwargs):
+ super().__init__(**kwargs)
+ self.num_output_group = num_output_group
+ # norm on group_tokens
+ self.norm_tokens = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm_tokens")
+ assign_mlp_ratio = (
+ config.assign_mlp_ratio
+ if isinstance(config.assign_mlp_ratio, collections.abc.Iterable)
+ else (config.assign_mlp_ratio, config.assign_mlp_ratio)
+ )
+ tokens_dim, channels_dim = [int(x * config.hidden_size) for x in assign_mlp_ratio]
+ self.mlp_inter = TFGroupViTMixerMLP(config, num_group_token, tokens_dim, num_output_group, name="mlp_inter")
+ self.norm_post_tokens = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm_post_tokens")
+ # norm on x
+ self.norm_x = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm_x")
+ self.pre_assign_attn = TFGroupViTCrossAttentionLayer(config, name="pre_assign_attn")
+
+ self.assign = TFGroupViTAssignAttention(config, name="assign")
+ self.norm_new_x = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm_new_x")
+ self.mlp_channels = TFGroupViTMLP(
+ config, config.hidden_size, channels_dim, config.hidden_size, name="mlp_channels"
+ )
+ self.config = config
+
+ def project_group_token(self, group_tokens: tf.Tensor) -> tf.Tensor:
+ """
+ Args:
+ group_tokens (tf.Tensor): group tokens, [batch_size, num_group_tokens, channels]
+
+ Returns:
+ projected_group_tokens (tf.Tensor): [batch_size, num_output_groups, channels]
+ """
+ # [B, num_output_groups, C] <- [B, num_group_tokens, C]
+ projected_group_tokens = self.mlp_inter(group_tokens)
+ projected_group_tokens = self.norm_post_tokens(projected_group_tokens)
+ return projected_group_tokens
+
+ def call(self, image_tokens: tf.Tensor, group_tokens: tf.Tensor, training: bool = False):
+ """
+ Args:
+ image_tokens (`tf.Tensor`): image tokens, of shape [batch_size, input_length, channels]
+ group_tokens (`tf.Tensor`): group tokens, [batch_size, num_group_tokens, channels]
+ """
+
+ group_tokens = self.norm_tokens(group_tokens)
+ image_tokens = self.norm_x(image_tokens)
+ # [batch_size, num_output_groups, channels]
+ projected_group_tokens = self.project_group_token(group_tokens)
+ projected_group_tokens = self.pre_assign_attn(projected_group_tokens, image_tokens)
+ new_image_tokens, attention = self.assign(projected_group_tokens, image_tokens)
+ new_image_tokens += projected_group_tokens
+
+ new_image_tokens = new_image_tokens + self.mlp_channels(self.norm_new_x(new_image_tokens))
+
+ return new_image_tokens, attention
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "norm_tokens", None) is not None:
+ with tf.name_scope(self.norm_tokens.name):
+ self.norm_tokens.build([None, None, self.config.hidden_size])
+ if getattr(self, "mlp_inter", None) is not None:
+ with tf.name_scope(self.mlp_inter.name):
+ self.mlp_inter.build(None)
+ if getattr(self, "norm_post_tokens", None) is not None:
+ with tf.name_scope(self.norm_post_tokens.name):
+ self.norm_post_tokens.build([None, None, self.config.hidden_size])
+ if getattr(self, "norm_x", None) is not None:
+ with tf.name_scope(self.norm_x.name):
+ self.norm_x.build([None, None, self.config.hidden_size])
+ if getattr(self, "pre_assign_attn", None) is not None:
+ with tf.name_scope(self.pre_assign_attn.name):
+ self.pre_assign_attn.build(None)
+ if getattr(self, "assign", None) is not None:
+ with tf.name_scope(self.assign.name):
+ self.assign.build(None)
+ if getattr(self, "norm_new_x", None) is not None:
+ with tf.name_scope(self.norm_new_x.name):
+ self.norm_new_x.build([None, None, self.config.hidden_size])
+ if getattr(self, "mlp_channels", None) is not None:
+ with tf.name_scope(self.mlp_channels.name):
+ self.mlp_channels.build(None)
+
+
+# Adapted from transformers.models.vit.modeling_tf_vit.TFViTPatchEmbeddings with ViT->GroupViT
+class TFGroupViTPatchEmbeddings(keras.layers.Layer):
+ """
+ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
+ `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
+ Transformer.
+ """
+
+ def __init__(self, config: GroupViTConfig, **kwargs):
+ super().__init__(**kwargs)
+ image_size, patch_size = config.image_size, config.patch_size
+ num_channels = config.num_channels
+ # hidden_size is a member as it will be required in the call method
+ self.hidden_size = config.hidden_size
+
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_patches = num_patches
+ self.num_channels = num_channels
+ self.config = config
+
+ self.projection = keras.layers.Conv2D(
+ filters=self.hidden_size,
+ kernel_size=patch_size,
+ strides=patch_size,
+ padding="valid",
+ data_format="channels_last",
+ use_bias=True,
+ kernel_initializer=get_initializer(self.config.initializer_range),
+ bias_initializer="zeros",
+ name="projection",
+ )
+
+ def call(
+ self, pixel_values: tf.Tensor, interpolate_pos_encoding: bool = False, training: bool = False
+ ) -> tf.Tensor:
+ batch_size, num_channels, height, width = shape_list(pixel_values)
+ if tf.executing_eagerly() and num_channels != self.num_channels:
+ raise ValueError(
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
+ )
+ if (
+ not interpolate_pos_encoding
+ and tf.executing_eagerly()
+ and (height != self.image_size[0] or width != self.image_size[1])
+ ):
+ raise ValueError(
+ f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})."
+ )
+
+ # When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format.
+ # So change the input format from `NCHW` to `NHWC`.
+ # shape = (batch_size, in_height, in_width, in_channels=num_channels)
+ pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
+
+ projection = self.projection(pixel_values)
+
+ # Change the 2D spatial dimensions to a single temporal dimension.
+ # shape = (batch_size, num_patches, out_channels=embed_dim)
+ num_patches = (width // self.patch_size[1]) * (height // self.patch_size[0])
+ # In the TFGroupViTVisionEmbeddings the embeddings from this layer will be layer normalized
+ # LayerNormalization layer needs to have static last dimension (otherwise the test_keras_save_load fails with symbolic tensors)
+ # This is why we have used the hidden_size in the reshape method
+ embeddings = tf.reshape(tensor=projection, shape=(batch_size, num_patches, self.hidden_size))
+
+ return embeddings
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "projection", None) is not None:
+ with tf.name_scope(self.projection.name):
+ self.projection.build([None, None, None, self.num_channels])
+
+
+# Adapted from transformers.vit.modeling_tf_vit.TFViTEmbeddings
+class TFGroupViTVisionEmbeddings(keras.layers.Layer):
+ """
+ Construct the position and patch embeddings.
+
+ """
+
+ def __init__(self, config: GroupViTVisionConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.patch_embeddings = TFGroupViTPatchEmbeddings(config, name="patch_embeddings")
+ self.dropout = keras.layers.Dropout(rate=config.dropout, name="dropout")
+ self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
+ self.config = config
+
+ def build(self, input_shape=None):
+ num_patches = self.patch_embeddings.num_patches
+ self.position_embeddings = self.add_weight(
+ shape=(1, num_patches, self.config.hidden_size),
+ initializer="zeros",
+ trainable=True,
+ name="position_embeddings",
+ )
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "patch_embeddings", None) is not None:
+ with tf.name_scope(self.patch_embeddings.name):
+ self.patch_embeddings.build(None)
+ if getattr(self, "dropout", None) is not None:
+ with tf.name_scope(self.dropout.name):
+ self.dropout.build(None)
+ if getattr(self, "layernorm", None) is not None:
+ with tf.name_scope(self.layernorm.name):
+ self.layernorm.build([None, None, self.config.hidden_size])
+
+ def interpolate_pos_encoding(self, embeddings, height, width) -> tf.Tensor:
+ """
+ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
+ resolution images.
+
+ Source:
+ https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174
+ """
+
+ batch_size, num_patches, dim = shape_list(embeddings)
+ num_positions = shape_list(self.position_embeddings)[1]
+
+ if num_patches == num_positions and height == width:
+ return self.position_embeddings
+ patch_pos_embed = self.position_embeddings
+ h0 = height // self.config.patch_size
+ w0 = width // self.config.patch_size
+ patch_pos_embed = tf.image.resize(
+ images=tf.reshape(
+ patch_pos_embed, shape=(1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim)
+ ),
+ size=(h0, w0),
+ method="bicubic",
+ )
+ patch_pos_embed = tf.reshape(tensor=patch_pos_embed, shape=(1, -1, dim))
+ return patch_pos_embed
+
+ def call(
+ self, pixel_values: tf.Tensor, interpolate_pos_encoding: bool = False, training: bool = False
+ ) -> tf.Tensor:
+ _, _, height, width = shape_list(pixel_values)
+ embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
+ embeddings = self.layernorm(embeddings)
+
+ # add positional encoding to each token
+ if interpolate_pos_encoding:
+ embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
+ else:
+ embeddings = embeddings + self.position_embeddings
+
+ embeddings = self.dropout(embeddings)
+
+ return embeddings
+
+
+# Copied from transformers.models.clip.modeling_tf_clip.TFCLIPTextEmbeddings with CLIP->GroupViT
+class TFGroupViTTextEmbeddings(keras.layers.Layer):
+ def __init__(self, config: GroupViTTextConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.embed_dim = config.hidden_size
+
+ self.config = config
+
+ def build(self, input_shape: tf.TensorShape = None):
+ with tf.name_scope("token_embedding"):
+ self.weight = self.add_weight(
+ shape=(self.config.vocab_size, self.embed_dim),
+ initializer=get_initializer(self.config.initializer_factor * self.config.initializer_range),
+ trainable=True,
+ name="weight",
+ )
+
+ with tf.name_scope("position_embedding"):
+ self.position_embedding = self.add_weight(
+ shape=(self.config.max_position_embeddings, self.embed_dim),
+ initializer=get_initializer(self.config.initializer_factor * self.config.initializer_range),
+ trainable=True,
+ name="embeddings",
+ )
+
+ super().build(input_shape)
+
+ def call(
+ self,
+ input_ids: tf.Tensor = None,
+ position_ids: tf.Tensor = None,
+ inputs_embeds: tf.Tensor = None,
+ ) -> tf.Tensor:
+ """
+ Applies embedding based on inputs tensor.
+
+ Returns:
+ final_embeddings (`tf.Tensor`): output embedding tensor.
+ """
+ if input_ids is None and inputs_embeds is None:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if inputs_embeds is None:
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
+
+ input_shape = shape_list(inputs_embeds)[:-1]
+
+ if position_ids is None:
+ position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
+
+ position_embeds = tf.gather(params=self.position_embedding, indices=position_ids)
+ position_embeds = tf.tile(input=position_embeds, multiples=(input_shape[0], 1, 1))
+ final_embeddings = inputs_embeds + position_embeds
+
+ return final_embeddings
+
+
+class TFGroupViTStage(keras.layers.Layer):
+ """This corresponds to the `GroupingLayer` class in the GroupViT implementation."""
+
+ def __init__(
+ self,
+ config: GroupViTVisionConfig,
+ depth: int,
+ num_prev_group_token: int,
+ num_group_token: int,
+ num_output_group: int,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ self.config = config
+ self.depth = depth
+ self.num_group_token = num_group_token
+ self.layers = [TFGroupViTEncoderLayer(config, name=f"layers_._{i}") for i in range(depth)]
+
+ if num_group_token > 0:
+ self.downsample = TFGroupViTTokenAssign(
+ config=config,
+ num_group_token=num_group_token,
+ num_output_group=num_output_group,
+ name="downsample",
+ )
+ else:
+ self.downsample = None
+
+ if num_prev_group_token > 0 and num_group_token > 0:
+ self.group_projector = [
+ keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="group_projector.0"),
+ TFGroupViTMixerMLP(
+ config, num_prev_group_token, config.hidden_size // 2, num_group_token, name="group_projector.1"
+ ),
+ ]
+ else:
+ self.group_projector = None
+
+ def build(self, input_shape=None):
+ if self.num_group_token > 0:
+ self.group_token = self.add_weight(
+ shape=(1, self.num_group_token, self.config.hidden_size),
+ initializer="zeros",
+ trainable=True,
+ name="group_token",
+ )
+ else:
+ self.group_token = None
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "downsample", None) is not None:
+ with tf.name_scope(self.downsample.name):
+ self.downsample.build(None)
+ if getattr(self, "layers", None) is not None:
+ for layer in self.layers:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+ if getattr(self, "group_projector", None) is not None:
+ with tf.name_scope(self.group_projector[0].name):
+ self.group_projector[0].build([None, None, self.config.hidden_size])
+ with tf.name_scope(self.group_projector[1].name):
+ self.group_projector[1].build(None)
+
+ @property
+ def with_group_token(self):
+ return self.group_token is not None
+
+ def split_x(self, x: tf.Tensor) -> tf.Tensor:
+ if self.with_group_token:
+ return x[:, : -self.num_group_token], x[:, -self.num_group_token :]
+ else:
+ return x, None
+
+ def concat_x(self, x: tf.Tensor, group_token: tf.Tensor | None = None) -> tf.Tensor:
+ if group_token is None:
+ return x
+ return tf.concat([x, group_token], axis=1)
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ prev_group_token: tf.Tensor | None = None,
+ output_attentions: bool = False,
+ training: bool = False,
+ ) -> Tuple[tf.Tensor]:
+ """
+ Args:
+ hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`tf.Tensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ `(config.encoder_attention_heads,)`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the grouping tensors of Grouping block.
+ """
+ if self.with_group_token:
+ group_token = tf.tile(self.group_token, multiples=(shape_list(hidden_states)[0], 1, 1))
+ if self.group_projector is not None:
+ for layer in self.group_projector:
+ prev_group_token = layer(prev_group_token)
+ group_token = group_token + prev_group_token
+ else:
+ group_token = None
+
+ x = hidden_states
+
+ cat_x = self.concat_x(x, group_token)
+ for layer in self.layers:
+ layer_out = layer(
+ cat_x,
+ attention_mask=None,
+ causal_attention_mask=None,
+ output_attentions=None,
+ )
+ cat_x = layer_out[0]
+
+ x, group_token = self.split_x(cat_x)
+
+ attention = None
+ if self.downsample is not None:
+ x, attention = self.downsample(x, group_token)
+
+ outputs = (x, group_token)
+ if output_attentions:
+ outputs = outputs + (attention,)
+
+ return outputs
+
+
+class TFGroupViTMLP(keras.layers.Layer):
+ def __init__(
+ self,
+ config: GroupViTVisionConfig,
+ hidden_size: Optional[int] = None,
+ intermediate_size: Optional[int] = None,
+ output_size: Optional[int] = None,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ self.config = config
+ self.activation_fn = get_tf_activation(config.hidden_act)
+ hidden_size = hidden_size if hidden_size is not None else config.hidden_size
+ intermediate_size = intermediate_size if intermediate_size is not None else config.intermediate_size
+ output_size = output_size if output_size is not None else hidden_size
+ self.fc1 = keras.layers.Dense(intermediate_size, name="fc1")
+ self.fc2 = keras.layers.Dense(output_size, name="fc2")
+ self.intermediate_size = intermediate_size
+ self.hidden_size = hidden_size
+
+ def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
+ hidden_states = self.fc1(hidden_states)
+ hidden_states = self.activation_fn(hidden_states)
+ hidden_states = self.fc2(hidden_states)
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "fc1", None) is not None:
+ with tf.name_scope(self.fc1.name):
+ self.fc1.build([None, None, self.hidden_size])
+ if getattr(self, "fc2", None) is not None:
+ with tf.name_scope(self.fc2.name):
+ self.fc2.build([None, None, self.intermediate_size])
+
+
+class TFGroupViTMixerMLP(TFGroupViTMLP):
+ def call(self, x, training: bool = False):
+ x = super().call(hidden_states=tf.transpose(x, perm=(0, 2, 1)))
+ return tf.transpose(x, perm=(0, 2, 1))
+
+
+# Adapted from transformers.models.clip.modeling_tf_clip.TFCLIPAttention
+class TFGroupViTAttention(keras.layers.Layer):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(self, config: GroupViTConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.embed_dim = config.hidden_size
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = self.embed_dim // self.num_attention_heads
+ if self.attention_head_size * self.num_attention_heads != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
+ f" {self.num_attention_heads})."
+ )
+
+ factor = config.initializer_factor
+ in_proj_std = (self.embed_dim**-0.5) * ((2 * config.num_hidden_layers) ** -0.5) * factor
+ out_proj_std = (self.embed_dim**-0.5) * factor
+
+ self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
+
+ self.q_proj = keras.layers.Dense(
+ units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name="q_proj"
+ )
+ self.k_proj = keras.layers.Dense(
+ units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name="k_proj"
+ )
+ self.v_proj = keras.layers.Dense(
+ units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name="v_proj"
+ )
+
+ self.dropout = keras.layers.Dropout(rate=config.attention_dropout)
+
+ self.out_proj = keras.layers.Dense(
+ units=self.embed_dim, kernel_initializer=get_initializer(out_proj_std), name="out_proj"
+ )
+
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention.transpose_for_scores
+ def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
+ tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
+
+ # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
+ return tf.transpose(tensor, perm=[0, 2, 1, 3])
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ attention_mask: tf.Tensor = None,
+ causal_attention_mask: tf.Tensor = None,
+ output_attentions: bool = None,
+ encoder_hidden_states: tf.Tensor = None,
+ training: bool = False,
+ ) -> Tuple[tf.Tensor]:
+ """Input shape: Batch x Time x Channel"""
+
+ batch_size = shape_list(hidden_states)[0]
+ is_cross_attention = encoder_hidden_states is not None
+
+ mixed_query_layer = self.q_proj(inputs=hidden_states)
+ if is_cross_attention:
+ mixed_key_layer = self.k_proj(inputs=encoder_hidden_states)
+ mixed_value_layer = self.v_proj(inputs=encoder_hidden_states)
+ else:
+ mixed_key_layer = self.k_proj(inputs=hidden_states)
+ mixed_value_layer = self.v_proj(inputs=hidden_states)
+
+ query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
+ key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
+ value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ # (batch size, num_heads, seq_len_q, seq_len_k)
+ attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
+ dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)
+ attention_scores = tf.divide(attention_scores, dk)
+
+ # apply the causal_attention_mask first
+ if causal_attention_mask is not None:
+ # Apply the causal attention mask (precomputed for all layers in TFCLIPModel call() function)
+ attention_scores = tf.add(attention_scores, causal_attention_mask)
+
+ if attention_mask is not None:
+ # Apply the attention mask (precomputed for all layers in TFCLIPModel call() function)
+ attention_scores = tf.add(attention_scores, attention_mask)
+
+ # Normalize the attention scores to probabilities.
+ _attention_probs = stable_softmax(logits=attention_scores, axis=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(inputs=_attention_probs)
+
+ attention_output = tf.matmul(attention_probs, value_layer)
+ attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])
+
+ # (batch_size, seq_len_q, embed_dim)
+ attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.embed_dim))
+
+ attention_output = self.out_proj(attention_output)
+ # In TFBert, attention weights are returned after dropout.
+ # However, in CLIP, they are returned before dropout.
+ outputs = (attention_output, _attention_probs) if output_attentions else (attention_output,)
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "q_proj", None) is not None:
+ with tf.name_scope(self.q_proj.name):
+ self.q_proj.build([None, None, self.embed_dim])
+ if getattr(self, "k_proj", None) is not None:
+ with tf.name_scope(self.k_proj.name):
+ self.k_proj.build([None, None, self.embed_dim])
+ if getattr(self, "v_proj", None) is not None:
+ with tf.name_scope(self.v_proj.name):
+ self.v_proj.build([None, None, self.embed_dim])
+ if getattr(self, "out_proj", None) is not None:
+ with tf.name_scope(self.out_proj.name):
+ self.out_proj.build([None, None, self.embed_dim])
+
+
+# Copied from transformers.models.clip.modeling_tf_clip.TFCLIPEncoderLayer with CLIP->GroupViT
+class TFGroupViTEncoderLayer(keras.layers.Layer):
+ def __init__(self, config: GroupViTConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.embed_dim = config.hidden_size
+ self.self_attn = TFGroupViTAttention(config, name="self_attn")
+ self.layer_norm1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm1")
+ self.mlp = TFGroupViTMLP(config, name="mlp")
+ self.layer_norm2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm2")
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ attention_mask: tf.Tensor,
+ causal_attention_mask: tf.Tensor,
+ output_attentions: bool,
+ training: bool = False,
+ ) -> Tuple[tf.Tensor]:
+ """
+ Args:
+ hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`tf.Tensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ causal_attention_mask (`tf.Tensor`): causal attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ output_attentions (`bool`):
+ Whether or not to return the attentions tensors of all attention layers. See `outputs` under returned
+ tensors for more detail.
+ """
+ residual = hidden_states
+
+ hidden_states = self.layer_norm1(inputs=hidden_states)
+ attention_outputs = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ causal_attention_mask=causal_attention_mask,
+ output_attentions=output_attentions,
+ training=training,
+ )
+ hidden_states = attention_outputs[0]
+ hidden_states = residual + hidden_states
+
+ residual = hidden_states
+ hidden_states = self.layer_norm2(inputs=hidden_states)
+ hidden_states = self.mlp(hidden_states=hidden_states)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,) + attention_outputs[1:] # add attentions if we output them
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "self_attn", None) is not None:
+ with tf.name_scope(self.self_attn.name):
+ self.self_attn.build(None)
+ if getattr(self, "layer_norm1", None) is not None:
+ with tf.name_scope(self.layer_norm1.name):
+ self.layer_norm1.build([None, None, self.embed_dim])
+ if getattr(self, "mlp", None) is not None:
+ with tf.name_scope(self.mlp.name):
+ self.mlp.build(None)
+ if getattr(self, "layer_norm2", None) is not None:
+ with tf.name_scope(self.layer_norm2.name):
+ self.layer_norm2.build([None, None, self.embed_dim])
+
+
+# Adapted from transformers.models.clip.modeling_tf_clip.TFGroupViTTextEncoder
+class TFGroupViTTextEncoder(keras.layers.Layer):
+ def __init__(self, config: GroupViTTextConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.layers = [TFGroupViTEncoderLayer(config, name=f"layers_._{i}") for i in range(config.num_hidden_layers)]
+
+ def call(
+ self,
+ hidden_states,
+ attention_mask: tf.Tensor,
+ causal_attention_mask: tf.Tensor,
+ output_attentions: bool,
+ output_hidden_states: bool,
+ return_dict: bool,
+ training: bool = False,
+ ) -> Union[Tuple, TFBaseModelOutput]:
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask,
+ causal_attention_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return TFBaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "layers", None) is not None:
+ for layer in self.layers:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+class TFGroupViTVisionEncoder(keras.layers.Layer):
+ def __init__(self, config: GroupViTVisionConfig, **kwargs) -> None:
+ super().__init__(**kwargs)
+
+ self.stages = [
+ TFGroupViTStage(
+ config=config,
+ depth=config.depths[i],
+ num_group_token=config.num_group_tokens[i],
+ num_output_group=config.num_output_groups[i],
+ num_prev_group_token=config.num_output_groups[i - 1] if i > 0 else 0,
+ name=f"stages_._{i}",
+ )
+ for i in range(len(config.depths))
+ ]
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ output_hidden_states: bool,
+ output_attentions: bool,
+ return_dict: bool,
+ training: bool = False,
+ ) -> Union[tuple, TFBaseModelOutput]:
+ all_hidden_states = () if output_hidden_states else None
+ all_groupings = () if output_attentions else None
+
+ group_tokens = None
+
+ for stage in self.stages:
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_outputs = stage(hidden_states, group_tokens, output_attentions)
+
+ hidden_states = layer_outputs[0]
+ group_tokens = layer_outputs[1]
+
+ if output_attentions and layer_outputs[2] is not None:
+ all_groupings = all_groupings + (layer_outputs[2],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_groupings] if v is not None)
+ return TFBaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_groupings
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "stages", None) is not None:
+ for layer in self.stages:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+# Copied from transformers.models.clip.modeling_tf_clip.TFCLIPTextTransformer with CLIPText->GroupViTText, CLIPEncoder->GroupViTTextEncoder
+class TFGroupViTTextTransformer(keras.layers.Layer):
+ def __init__(self, config: GroupViTTextConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.embeddings = TFGroupViTTextEmbeddings(config, name="embeddings")
+ self.encoder = TFGroupViTTextEncoder(config, name="encoder")
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="final_layer_norm")
+
+ # For `pooled_output` computation
+ self.eos_token_id = config.eos_token_id
+ self.embed_dim = config.hidden_size
+
+ def call(
+ self,
+ input_ids: TFModelInputType,
+ attention_mask: tf.Tensor,
+ position_ids: tf.Tensor,
+ output_attentions: bool,
+ output_hidden_states: bool,
+ return_dict: bool,
+ training: bool = False,
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
+ input_shape = shape_list(input_ids)
+
+ embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids)
+
+ batch_size, seq_length = input_shape
+ # CLIP's text model uses causal mask, prepare it here.
+ # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324
+ causal_attention_mask = self._build_causal_attention_mask(batch_size, seq_length, dtype=embedding_output.dtype)
+
+ # check attention mask and invert
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ attention_mask = _expand_mask(attention_mask)
+
+ encoder_outputs = self.encoder(
+ hidden_states=embedding_output,
+ attention_mask=attention_mask,
+ causal_attention_mask=causal_attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ sequence_output = encoder_outputs[0]
+ sequence_output = self.final_layer_norm(inputs=sequence_output)
+
+ if self.eos_token_id == 2:
+ # The `eos_token_id` was incorrect before PR #24773: Let's keep what have been done here.
+ # A CLIP model with such `eos_token_id` in the config can't work correctly with extra new tokens added
+ # ------------------------------------------------------------
+ # text_embeds.shape = [batch_size, n_ctx, transformer.width]
+ # take features from the eot embedding (eot_token is the highest number in each sequence)
+ pooled_output = tf.gather_nd(
+ params=sequence_output,
+ indices=tf.stack(
+ values=(tf.range(input_shape[0], dtype=tf.int64), tf.math.argmax(input_ids, axis=-1)), axis=1
+ ),
+ )
+ else:
+ # The config gets updated `eos_token_id` from PR #24773 (so the use of exta new tokens is possible)
+ pooled_output = tf.gather_nd(
+ params=sequence_output,
+ indices=tf.stack(
+ values=(
+ tf.range(input_shape[0], dtype=tf.int64),
+ tf.math.argmax(tf.cast(input_ids == self.eos_token_id, dtype=tf.int8), axis=-1),
+ ),
+ axis=1,
+ ),
+ )
+
+ if not return_dict:
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
+
+ return TFBaseModelOutputWithPooling(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+ def _build_causal_attention_mask(self, batch_size, seq_length, dtype=tf.float32):
+ # It is possible with an unspecified sequence length for seq_length to be
+ # a runtime value, which is unsupported by tf.constant. Per the TensorFlow
+ # docs, tf.fill can handle runtime dynamic shapes:
+ # https://www.tensorflow.org/api_docs/python/tf/fill
+ diag = tf.cast(tf.fill((seq_length,), 0.0), dtype)
+
+ # set an additive 2D attention mask with all places being masked
+ to_mask = tf.cast(tf.fill((seq_length, seq_length), -10000.0), dtype)
+
+ # set diagonal & lower triangular parts to 0 (i.e. the places not to be masked)
+ # TIP: think the 2D matrix as the space of (query_seq, key_seq)
+ to_mask = tf.linalg.band_part(to_mask, 0, -1)
+ # to_mask = tf.linalg.band_part(to_mask, -1, 0)
+ to_mask = tf.linalg.set_diag(to_mask, diagonal=diag)
+
+ return tf.broadcast_to(input=to_mask, shape=(batch_size, 1, seq_length, seq_length))
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embeddings", None) is not None:
+ with tf.name_scope(self.embeddings.name):
+ self.embeddings.build(None)
+ if getattr(self, "encoder", None) is not None:
+ with tf.name_scope(self.encoder.name):
+ self.encoder.build(None)
+ if getattr(self, "final_layer_norm", None) is not None:
+ with tf.name_scope(self.final_layer_norm.name):
+ self.final_layer_norm.build([None, None, self.embed_dim])
+
+
+# Adapted from transformers.models.clip.modeling_tf_clip.TFCLIPVisionTransformer
+class TFGroupViTVisionTransformer(keras.layers.Layer):
+ def __init__(self, config: GroupViTVisionConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.embeddings = TFGroupViTVisionEmbeddings(config, name="embeddings")
+ self.encoder = TFGroupViTVisionEncoder(config, name="encoder")
+ self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
+ self.embed_dim = config.hidden_size
+
+ def call(
+ self,
+ pixel_values: TFModelInputType,
+ output_attentions: bool,
+ output_hidden_states: bool,
+ return_dict: bool,
+ training: bool = False,
+ ) -> Union[Tuple, TFBaseModelOutputWithPooling]:
+ embedding_output = self.embeddings(pixel_values)
+
+ encoder_outputs = self.encoder(
+ hidden_states=embedding_output,
+ output_hidden_states=output_hidden_states,
+ output_attentions=output_attentions,
+ return_dict=return_dict,
+ )
+
+ last_hidden_state = encoder_outputs[0]
+
+ # normalize the last hidden state
+ last_hidden_state = self.layernorm(last_hidden_state)
+ pooled_output = tf.math.reduce_mean(last_hidden_state, axis=1)
+
+ if not return_dict:
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
+
+ return TFBaseModelOutputWithPooling(
+ last_hidden_state=last_hidden_state,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embeddings", None) is not None:
+ with tf.name_scope(self.embeddings.name):
+ self.embeddings.build(None)
+ if getattr(self, "encoder", None) is not None:
+ with tf.name_scope(self.encoder.name):
+ self.encoder.build(None)
+ if getattr(self, "layernorm", None) is not None:
+ with tf.name_scope(self.layernorm.name):
+ self.layernorm.build([None, None, self.embed_dim])
+
+
+@keras_serializable
+# Copied from transformers.models.clip.modeling_tf_clip.TFCLIPTextMainLayer with CLIP->GroupViT
+class TFGroupViTTextMainLayer(keras.layers.Layer):
+ config_class = GroupViTTextConfig
+
+ def __init__(self, config: GroupViTTextConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+ self.text_model = TFGroupViTTextTransformer(config, name="text_model")
+
+ def get_input_embeddings(self) -> keras.layers.Layer:
+ return self.text_model.embeddings
+
+ def set_input_embeddings(self, value: tf.Variable):
+ self.text_model.embeddings.weight = value
+ self.text_model.embeddings.vocab_size = shape_list(value)[0]
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
+ if input_ids is None:
+ raise ValueError("You have to specify input_ids")
+
+ input_shape = shape_list(input_ids)
+
+ if attention_mask is None:
+ attention_mask = tf.fill(dims=input_shape, value=1)
+
+ text_model_outputs = self.text_model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return text_model_outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "text_model", None) is not None:
+ with tf.name_scope(self.text_model.name):
+ self.text_model.build(None)
+
+
+@keras_serializable
+# Copied from transformers.models.clip.modeling_tf_clip.TFCLIPVisionMainLayer with CLIP->GroupViT
+class TFGroupViTVisionMainLayer(keras.layers.Layer):
+ config_class = GroupViTVisionConfig
+
+ def __init__(self, config: GroupViTVisionConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+ self.vision_model = TFGroupViTVisionTransformer(config, name="vision_model")
+
+ def get_input_embeddings(self) -> keras.layers.Layer:
+ return self.vision_model.embeddings
+
+ @unpack_inputs
+ def call(
+ self,
+ pixel_values: TFModelInputType | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ vision_model_outputs = self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return vision_model_outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "vision_model", None) is not None:
+ with tf.name_scope(self.vision_model.name):
+ self.vision_model.build(None)
+
+
+@keras_serializable
+# Adapted from transformers.models.clip.modeling_tf_clip.TFCLIPMainLayer
+class TFGroupViTMainLayer(keras.layers.Layer):
+ config_class = GroupViTConfig
+
+ def __init__(self, config: GroupViTConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ if not isinstance(config.text_config, GroupViTTextConfig):
+ raise ValueError(
+ "config.text_config is expected to be of type GroupViTTextConfig but is of type"
+ f" {type(config.text_config)}."
+ )
+
+ if not isinstance(config.vision_config, GroupViTVisionConfig):
+ raise ValueError(
+ "config.vision_config is expected to be of type GroupViTVisionConfig but is of type"
+ f" {type(config.vision_config)}."
+ )
+
+ self.config = config
+
+ text_config = config.text_config
+ vision_config = config.vision_config
+
+ self.projection_dim = config.projection_dim
+ self.projection_intermediate_dim = config.projection_intermediate_dim
+ self.text_embed_dim = text_config.hidden_size
+ self.vision_embed_dim = vision_config.hidden_size
+
+ self.text_model = TFGroupViTTextTransformer(text_config, name="text_model")
+ self.vision_model = TFGroupViTVisionTransformer(vision_config, name="vision_model")
+
+ self.visual_projection = [
+ keras.layers.Dense(self.projection_intermediate_dim, name="visual_projection.0"),
+ keras.layers.BatchNormalization(name="visual_projection.1", momentum=0.9, epsilon=1e-5),
+ keras.layers.ReLU(name="visual_projection.2"),
+ keras.layers.Dense(self.projection_dim, name="visual_projection.3"),
+ ]
+ self.text_projection = [
+ keras.layers.Dense(self.projection_intermediate_dim, name="text_projection.0"),
+ keras.layers.BatchNormalization(name="text_projection.1", momentum=0.9, epsilon=1e-5),
+ keras.layers.ReLU(name="text_projection.2"),
+ keras.layers.Dense(self.projection_dim, name="text_projection.3"),
+ ]
+
+ def build(self, input_shape=None):
+ self.logit_scale = self.add_weight(
+ shape=(1,),
+ initializer=keras.initializers.Constant(self.config.logit_scale_init_value),
+ trainable=True,
+ name="logit_scale",
+ )
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "text_model", None) is not None:
+ with tf.name_scope(self.text_model.name):
+ self.text_model.build(None)
+ if getattr(self, "vision_model", None) is not None:
+ with tf.name_scope(self.vision_model.name):
+ self.vision_model.build(None)
+ if getattr(self, "visual_projection", None) is not None:
+ with tf.name_scope(self.visual_projection[0].name):
+ self.visual_projection[0].build([None, None, None, self.vision_embed_dim])
+ with tf.name_scope(self.visual_projection[1].name):
+ self.visual_projection[1].build((None, self.projection_intermediate_dim))
+ with tf.name_scope(self.visual_projection[3].name):
+ self.visual_projection[3].build([None, None, None, self.projection_intermediate_dim])
+ if getattr(self, "text_projection", None) is not None:
+ with tf.name_scope(self.text_projection[0].name):
+ self.text_projection[0].build([None, None, None, self.text_embed_dim])
+ with tf.name_scope(self.text_projection[1].name):
+ self.text_projection[1].build((None, self.projection_intermediate_dim))
+ with tf.name_scope(self.text_projection[3].name):
+ self.text_projection[3].build([None, None, None, self.projection_intermediate_dim])
+
+ @unpack_inputs
+ def get_text_features(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> tf.Tensor:
+ if input_ids is None:
+ raise ValueError("You have to specify either input_ids")
+
+ input_shape = shape_list(input_ids)
+
+ if attention_mask is None:
+ attention_mask = tf.fill(dims=input_shape, value=1)
+
+ text_outputs = self.text_model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ pooled_output = text_outputs[1]
+ for layer in self.text_projection:
+ pooled_output = layer(pooled_output)
+
+ text_features = pooled_output
+ return text_features
+
+ @unpack_inputs
+ def get_image_features(
+ self,
+ pixel_values: TFModelInputType | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> tf.Tensor:
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ vision_outputs = self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ pooled_output = vision_outputs[1]
+ for layer in self.visual_projection:
+ pooled_output = layer(pooled_output)
+
+ image_features = pooled_output
+ return image_features
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ pixel_values: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ return_loss: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_segmentation: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[TFGroupViTModelOutput, Tuple[tf.Tensor]]:
+ if input_ids is None:
+ raise ValueError("You have to specify either input_ids")
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ input_shape = shape_list(input_ids)
+
+ if attention_mask is None:
+ attention_mask = tf.fill(dims=input_shape, value=1)
+ if output_segmentation:
+ output_attentions = True
+ vision_outputs = self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ text_outputs = self.text_model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ image_embeds = vision_outputs[1]
+ for layer in self.visual_projection:
+ image_embeds = layer(image_embeds)
+
+ text_embeds = text_outputs[1]
+ for layer in self.text_projection:
+ text_embeds = layer(text_embeds)
+
+ # normalized features
+ image_embeds = image_embeds / tf.norm(image_embeds, axis=-1, keepdims=True)
+ text_embeds = text_embeds / tf.norm(text_embeds, axis=-1, keepdims=True)
+
+ # cosine similarity as logits
+ logit_scale = tf.math.exp(self.logit_scale)
+ logits_per_text = tf.matmul(text_embeds, image_embeds, transpose_b=True) * logit_scale
+ logits_per_image = tf.transpose(logits_per_text)
+
+ seg_logits = None
+ if output_segmentation:
+ # grouped features
+ # [batch_size_image, num_group, hidden_size]
+ image_group_embeds = vision_outputs[0]
+ # [batch_size_image*num_group, hidden_size]
+ image_group_embeds = tf.reshape(image_group_embeds, shape=(-1, shape_list(image_group_embeds)[-1]))
+ for layer in self.visual_projection:
+ image_group_embeds = layer(image_group_embeds)
+ if output_hidden_states:
+ attentions = vision_outputs[3]
+ else:
+ attentions = vision_outputs[2]
+ # [batch_size_image, num_group, height, width]
+ grouping = get_grouping_from_attentions(attentions, pixel_values.shape[2:])
+
+ # normalized features
+ image_group_embeds = image_group_embeds / tf.norm(
+ tensor=image_group_embeds, ord="euclidean", axis=-1, keepdims=True
+ )
+ # [batch_size_image x num_group, batch_size_text]
+ logits_per_image_group = tf.matmul(image_group_embeds, text_embeds, transpose_b=True) * logit_scale
+ # [batch_size_image, batch_size_text, num_group]
+ logits_per_image_group = tf.reshape(
+ logits_per_image_group, shape=(image_embeds.shape[0], -1, text_embeds.shape[0])
+ )
+ logits_per_image_group = tf.transpose(logits_per_image_group, perm=(0, 2, 1))
+
+ # [batch_size_image, batch_size_text, height x width]
+ flatten_grouping = tf.reshape(grouping, shape=(shape_list(grouping)[0], shape_list(grouping)[1], -1))
+
+ # [batch_size_image, batch_size_text, height, width]
+ seg_logits = tf.matmul(logits_per_image_group, flatten_grouping) * logit_scale
+ seg_logits = tf.reshape(
+ seg_logits, shape=(seg_logits.shape[0], seg_logits.shape[1], grouping.shape[2], grouping.shape[3])
+ )
+
+ loss = None
+ if return_loss:
+ loss = groupvit_loss(logits_per_text)[None, ...]
+
+ if not return_dict:
+ if seg_logits is not None:
+ output = (
+ logits_per_image,
+ logits_per_text,
+ seg_logits,
+ text_embeds,
+ image_embeds,
+ text_outputs,
+ vision_outputs,
+ )
+ else:
+ output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
+ return ((loss,) + output) if loss is not None else output
+
+ return TFGroupViTModelOutput(
+ loss=loss,
+ logits_per_image=logits_per_image,
+ logits_per_text=logits_per_text,
+ segmentation_logits=seg_logits,
+ text_embeds=text_embeds,
+ image_embeds=image_embeds,
+ text_model_output=text_outputs,
+ vision_model_output=vision_outputs,
+ )
+
+
+class TFGroupViTPreTrainedModel(TFPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = GroupViTConfig
+ base_model_prefix = "groupvit"
+
+
+GROUPVIT_START_DOCSTRING = r"""
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TF 2.0 models accepts two formats as inputs:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional arguments.
+
+ This second option is useful when using [`keras.Model.fit`] method which currently requires having all the
+ tensors in the first argument of the model call function: `model(inputs)`.
+
+ If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the
+ first positional argument :
+
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
+
+
+
+ Args:
+ config ([`GroupViTConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+GROUPVIT_TEXT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
+ [`PreTrainedTokenizer.encode`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
+ config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False``):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+"""
+
+GROUPVIT_VISION_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]`, `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
+ [`CLIPImageProcessor.__call__`] for details.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
+ config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False``):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+"""
+
+GROUPVIT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
+ [`PreTrainedTokenizer.encode`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
+ [`CLIPImageProcessor.__call__`] for details.
+ attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ return_loss (`bool`, *optional*):
+ Whether or not to return the contrastive loss.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
+ config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False``):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+"""
+
+
+class TFGroupViTTextModel(TFGroupViTPreTrainedModel):
+ config_class = GroupViTTextConfig
+ main_input_name = "input_ids"
+
+ def __init__(self, config: GroupViTTextConfig, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.groupvit = TFGroupViTTextMainLayer(config, name="groupvit")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=GroupViTTextConfig)
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import CLIPTokenizer, TFGroupViTTextModel
+
+ >>> tokenizer = CLIPTokenizer.from_pretrained("nvidia/groupvit-gcc-yfcc")
+ >>> model = TFGroupViTTextModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
+
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="tf")
+
+ >>> outputs = model(**inputs)
+ >>> last_hidden_state = outputs.last_hidden_state
+ >>> pooled_output = outputs.pooler_output # pooled (EOS token) states
+ ```"""
+
+ outputs = self.groupvit(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "groupvit", None) is not None:
+ with tf.name_scope(self.groupvit.name):
+ self.groupvit.build(None)
+
+
+class TFGroupViTVisionModel(TFGroupViTPreTrainedModel):
+ config_class = GroupViTVisionConfig
+ main_input_name = "pixel_values"
+
+ def __init__(self, config: GroupViTVisionConfig, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.groupvit = TFGroupViTVisionMainLayer(config, name="groupvit")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=GroupViTVisionConfig)
+ def call(
+ self,
+ pixel_values: TFModelInputType | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, TFGroupViTVisionModel
+
+ >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc")
+ >>> model = TFGroupViTVisionModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> inputs = processor(images=image, return_tensors="tf")
+
+ >>> outputs = model(**inputs)
+ >>> last_hidden_state = outputs.last_hidden_state
+ >>> pooled_output = outputs.pooler_output # pooled CLS states
+ ```"""
+
+ outputs = self.groupvit(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "groupvit", None) is not None:
+ with tf.name_scope(self.groupvit.name):
+ self.groupvit.build(None)
+
+
+@add_start_docstrings(GROUPVIT_START_DOCSTRING)
+class TFGroupViTModel(TFGroupViTPreTrainedModel):
+ config_class = GroupViTConfig
+
+ def __init__(self, config: GroupViTConfig, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.groupvit = TFGroupViTMainLayer(config, name="groupvit")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ def get_text_features(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> tf.Tensor:
+ r"""
+ Returns:
+ text_features (`tf.Tensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying
+ the projection layer to the pooled output of [`TFGroupViTTextModel`].
+
+ Examples:
+
+ ```python
+ >>> from transformers import CLIPTokenizer, TFGroupViTModel
+
+ >>> model = TFGroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
+ >>> tokenizer = CLIPTokenizer.from_pretrained("nvidia/groupvit-gcc-yfcc")
+
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="tf")
+ >>> text_features = model.get_text_features(**inputs)
+ ```"""
+
+ text_features = self.groupvit.get_text_features(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return text_features
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING)
+ def get_image_features(
+ self,
+ pixel_values: TFModelInputType | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> tf.Tensor:
+ r"""
+ Returns:
+ image_features (`tf.Tensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying
+ the projection layer to the pooled output of [`TFGroupViTVisionModel`].
+
+ Examples:
+
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, TFGroupViTModel
+
+ >>> model = TFGroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
+ >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc")
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> inputs = processor(images=image, return_tensors="tf")
+
+ >>> image_features = model.get_image_features(**inputs)
+ ```"""
+
+ image_features = self.groupvit.get_image_features(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return image_features
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(GROUPVIT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=TFGroupViTModelOutput, config_class=GroupViTConfig)
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ pixel_values: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ return_loss: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_segmentation: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[TFGroupViTModelOutput, Tuple[tf.Tensor]]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, TFGroupViTModel
+ >>> import tensorflow as tf
+
+ >>> model = TFGroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
+ >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc")
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> inputs = processor(
+ ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="tf", padding=True
+ ... )
+
+ >>> outputs = model(**inputs)
+ >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
+ >>> probs = tf.math.softmax(logits_per_image, axis=1) # we can take the softmax to get the label probabilities
+ ```"""
+
+ outputs = self.groupvit(
+ input_ids=input_ids,
+ pixel_values=pixel_values,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ return_loss=return_loss,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ output_segmentation=output_segmentation,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return outputs
+
+ def serving_output(self, output: TFGroupViTModelOutput) -> TFGroupViTModelOutput:
+ # TODO: As is this currently fails with saved_model=True, because
+ # TensorFlow cannot trace through nested dataclasses. Reference:
+ # https://github.com/huggingface/transformers/pull/16886
+ return output
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "groupvit", None) is not None:
+ with tf.name_scope(self.groupvit.name):
+ self.groupvit.build(None)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..b5262941cb0e5ceca0e13fe86ee17b638c11b7ea
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__init__.py
@@ -0,0 +1,114 @@
+# Copyright 2022 EleutherAI and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_flax_available,
+ is_sentencepiece_available,
+ is_tokenizers_available,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
+}
+
+try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_llama"] = ["LlamaTokenizer"]
+
+try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_llama_fast"] = ["LlamaTokenizerFast"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_llama"] = [
+ "LlamaForCausalLM",
+ "LlamaModel",
+ "LlamaPreTrainedModel",
+ "LlamaForSequenceClassification",
+ "LlamaForQuestionAnswering",
+ ]
+
+try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_flax_llama"] = ["FlaxLlamaForCausalLM", "FlaxLlamaModel", "FlaxLlamaPreTrainedModel"]
+
+
+if TYPE_CHECKING:
+ from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
+
+ try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_llama import LlamaTokenizer
+
+ try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_llama_fast import LlamaTokenizerFast
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_llama import (
+ LlamaForCausalLM,
+ LlamaForQuestionAnswering,
+ LlamaForSequenceClassification,
+ LlamaModel,
+ LlamaPreTrainedModel,
+ )
+
+ try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_flax_llama import FlaxLlamaForCausalLM, FlaxLlamaModel, FlaxLlamaPreTrainedModel
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ccc9f8e86927d46e760f6d86b005ea3cb866d7cf
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__pycache__/configuration_llama.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__pycache__/configuration_llama.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..da0c3c9735806a8b70494179e680e68263735f88
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__pycache__/configuration_llama.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__pycache__/convert_llama_weights_to_hf.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__pycache__/convert_llama_weights_to_hf.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..36f5e8600d184094ef42e287eebb0e058bb93991
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__pycache__/convert_llama_weights_to_hf.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__pycache__/modeling_flax_llama.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__pycache__/modeling_flax_llama.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c7c71ac1eee6236c7ee3f0bf1d9c1bb4d6488855
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__pycache__/modeling_flax_llama.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__pycache__/modeling_llama.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__pycache__/modeling_llama.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..88b23eaedd163eaadb6aa573c6d80ea60b5f7c10
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__pycache__/modeling_llama.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__pycache__/tokenization_llama.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__pycache__/tokenization_llama.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5df3f8e7b905c6d57d40d3dec99af372e0dc5bf1
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__pycache__/tokenization_llama.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__pycache__/tokenization_llama_fast.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__pycache__/tokenization_llama_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0b5d654deca4c0c5aa332bfe6128ae32a06272d7
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/__pycache__/tokenization_llama_fast.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/configuration_llama.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/configuration_llama.py
new file mode 100644
index 0000000000000000000000000000000000000000..b62a1053094b91e405ab7fa46768ec56aa2a51ed
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/configuration_llama.py
@@ -0,0 +1,191 @@
+# coding=utf-8
+# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
+#
+# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
+# and OPT implementations in this library. It has been modified from its
+# original forms to accommodate minor architectural differences compared
+# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" LLaMA model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
+
+
+class LlamaConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`LlamaModel`]. It is used to instantiate an LLaMA
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the LLaMA-7B.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 32000):
+ Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`LlamaModel`]
+ hidden_size (`int`, *optional*, defaults to 4096):
+ Dimension of the hidden representations.
+ intermediate_size (`int`, *optional*, defaults to 11008):
+ Dimension of the MLP representations.
+ num_hidden_layers (`int`, *optional*, defaults to 32):
+ Number of hidden layers in the Transformer decoder.
+ num_attention_heads (`int`, *optional*, defaults to 32):
+ Number of attention heads for each attention layer in the Transformer decoder.
+ num_key_value_heads (`int`, *optional*):
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
+ by meanpooling all the original heads within that group. For more details checkout [this
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
+ `num_attention_heads`.
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
+ The non-linear activation function (function or string) in the decoder.
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
+ The maximum sequence length that this model might ever be used with. Llama 1 supports up to 2048 tokens,
+ Llama 2 up to 4096, CodeLlama up to 16384.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
+ The epsilon used by the rms normalization layers.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`.
+ pad_token_id (`int`, *optional*):
+ Padding token id.
+ bos_token_id (`int`, *optional*, defaults to 1):
+ Beginning of stream token id.
+ eos_token_id (`int`, *optional*, defaults to 2):
+ End of stream token id.
+ pretraining_tp (`int`, *optional*, defaults to 1):
+ Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
+ document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism) to understand more about it. This value is
+ necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
+ issue](https://github.com/pytorch/pytorch/issues/76232).
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
+ Whether to tie weight embeddings
+ rope_theta (`float`, *optional*, defaults to 10000.0):
+ The base period of the RoPE embeddings.
+ rope_scaling (`Dict`, *optional*):
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
+ `max_position_embeddings` to the expected new maximum. See the following thread for more information on how
+ these scaling strategies behave:
+ https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
+ experimental feature, subject to breaking API changes in future versions.
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+
+ ```python
+ >>> from transformers import LlamaModel, LlamaConfig
+
+ >>> # Initializing a LLaMA llama-7b style configuration
+ >>> configuration = LlamaConfig()
+
+ >>> # Initializing a model from the llama-7b style configuration
+ >>> model = LlamaModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "llama"
+ keys_to_ignore_at_inference = ["past_key_values"]
+
+ def __init__(
+ self,
+ vocab_size=32000,
+ hidden_size=4096,
+ intermediate_size=11008,
+ num_hidden_layers=32,
+ num_attention_heads=32,
+ num_key_value_heads=None,
+ hidden_act="silu",
+ max_position_embeddings=2048,
+ initializer_range=0.02,
+ rms_norm_eps=1e-6,
+ use_cache=True,
+ pad_token_id=None,
+ bos_token_id=1,
+ eos_token_id=2,
+ pretraining_tp=1,
+ tie_word_embeddings=False,
+ rope_theta=10000.0,
+ rope_scaling=None,
+ attention_bias=False,
+ attention_dropout=0.0,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+
+ # for backward compatibility
+ if num_key_value_heads is None:
+ num_key_value_heads = num_attention_heads
+
+ self.num_key_value_heads = num_key_value_heads
+ self.hidden_act = hidden_act
+ self.initializer_range = initializer_range
+ self.rms_norm_eps = rms_norm_eps
+ self.pretraining_tp = pretraining_tp
+ self.use_cache = use_cache
+ self.rope_theta = rope_theta
+ self.rope_scaling = rope_scaling
+ self._rope_scaling_validation()
+ self.attention_bias = attention_bias
+ self.attention_dropout = attention_dropout
+
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ tie_word_embeddings=tie_word_embeddings,
+ **kwargs,
+ )
+
+ def _rope_scaling_validation(self):
+ """
+ Validate the `rope_scaling` configuration.
+ """
+ if self.rope_scaling is None:
+ return
+
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
+ raise ValueError(
+ "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
+ f"got {self.rope_scaling}"
+ )
+ rope_scaling_type = self.rope_scaling.get("type", None)
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
+ raise ValueError(
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
+ )
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
+ raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/convert_llama_weights_to_hf.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/convert_llama_weights_to_hf.py
new file mode 100644
index 0000000000000000000000000000000000000000..f9bca1204a22ec3b2820cb69235fc1e32cac7040
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/convert_llama_weights_to_hf.py
@@ -0,0 +1,339 @@
+# Copyright 2022 EleutherAI and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import argparse
+import gc
+import json
+import os
+import shutil
+import warnings
+
+import torch
+
+from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
+
+
+try:
+ from transformers import LlamaTokenizerFast
+except ImportError as e:
+ warnings.warn(e)
+ warnings.warn(
+ "The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"
+ )
+ LlamaTokenizerFast = None
+
+"""
+Sample usage:
+
+```
+python src/transformers/models/llama/convert_llama_weights_to_hf.py \
+ --input_dir /path/to/downloaded/llama/weights --model_size 7B --output_dir /output/path
+```
+
+Thereafter, models can be loaded via:
+
+```py
+from transformers import LlamaForCausalLM, LlamaTokenizer
+
+model = LlamaForCausalLM.from_pretrained("/output/path")
+tokenizer = LlamaTokenizer.from_pretrained("/output/path")
+```
+
+Important note: you need to be able to host the whole model in RAM to execute this script (even if the biggest versions
+come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM).
+"""
+
+NUM_SHARDS = {
+ "7B": 1,
+ "7Bf": 1,
+ "13B": 2,
+ "13Bf": 2,
+ "34B": 4,
+ "30B": 4,
+ "65B": 8,
+ "70B": 8,
+ "70Bf": 8,
+}
+
+
+def compute_intermediate_size(n, ffn_dim_multiplier=1, multiple_of=256):
+ return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3)) + multiple_of - 1) // multiple_of)
+
+
+def read_json(path):
+ with open(path, "r") as f:
+ return json.load(f)
+
+
+def write_json(text, path):
+ with open(path, "w") as f:
+ json.dump(text, f)
+
+
+def write_model(
+ model_path, input_base_path, model_size, tokenizer_path=None, safe_serialization=True, llama_version=1
+):
+ # for backward compatibility, before you needed the repo to be called `my_repo/model_size`
+ if not os.path.isfile(os.path.join(input_base_path, "params.json")):
+ input_base_path = os.path.join(input_base_path, model_size)
+
+ os.makedirs(model_path, exist_ok=True)
+ tmp_model_path = os.path.join(model_path, "tmp")
+ os.makedirs(tmp_model_path, exist_ok=True)
+
+ params = read_json(os.path.join(input_base_path, "params.json"))
+ num_shards = NUM_SHARDS[model_size]
+ params = params.get("model", params)
+ n_layers = params["n_layers"]
+ n_heads = params["n_heads"]
+ n_heads_per_shard = n_heads // num_shards
+ dim = params["dim"]
+ dims_per_head = dim // n_heads
+ base = params.get("rope_theta", 10000.0)
+ inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head))
+ if base > 10000.0:
+ max_position_embeddings = 16384
+ else:
+ # Depending on the Llama version, the default max_position_embeddings has different values.
+ if llama_version == 1:
+ max_position_embeddings = 2048
+ elif llama_version == 2:
+ max_position_embeddings = 4096
+ else:
+ raise NotImplementedError(
+ f"Version {llama_version} of llama is not supported yet. "
+ "Current supported versions of llama are [1, 2]."
+ )
+
+ tokenizer_class = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
+ if tokenizer_path is not None:
+ tokenizer = tokenizer_class(tokenizer_path)
+ tokenizer.save_pretrained(model_path)
+ vocab_size = tokenizer.vocab_size if tokenizer_path is not None else 32000
+
+ if params.get("n_kv_heads", None) is not None:
+ num_key_value_heads = params["n_kv_heads"] # for GQA / MQA
+ num_local_key_value_heads = n_heads_per_shard // num_key_value_heads
+ key_value_dim = dim // num_key_value_heads
+ else: # compatibility with other checkpoints
+ num_key_value_heads = n_heads
+ num_local_key_value_heads = n_heads_per_shard
+ key_value_dim = dim
+
+ # permute for sliced rotary
+ def permute(w, n_heads=n_heads, dim1=dim, dim2=dim):
+ return w.view(n_heads, dim1 // n_heads // 2, 2, dim2).transpose(1, 2).reshape(dim1, dim2)
+
+ print(f"Fetching all parameters from the checkpoint at {input_base_path}.")
+ # Load weights
+ if num_shards == 1:
+ # Not sharded
+ # (The sharded implementation would also work, but this is simpler.)
+ loaded = torch.load(os.path.join(input_base_path, "consolidated.00.pth"), map_location="cpu")
+ else:
+ # Sharded
+ loaded = [
+ torch.load(os.path.join(input_base_path, f"consolidated.{i:02d}.pth"), map_location="cpu")
+ for i in range(num_shards)
+ ]
+ param_count = 0
+ index_dict = {"weight_map": {}}
+ for layer_i in range(n_layers):
+ filename = f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
+ if num_shards == 1:
+ # Unsharded
+ state_dict = {
+ f"model.layers.{layer_i}.self_attn.q_proj.weight": permute(
+ loaded[f"layers.{layer_i}.attention.wq.weight"]
+ ),
+ f"model.layers.{layer_i}.self_attn.k_proj.weight": permute(
+ loaded[f"layers.{layer_i}.attention.wk.weight"]
+ ),
+ f"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[f"layers.{layer_i}.attention.wv.weight"],
+ f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[f"layers.{layer_i}.attention.wo.weight"],
+ f"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w1.weight"],
+ f"model.layers.{layer_i}.mlp.down_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w2.weight"],
+ f"model.layers.{layer_i}.mlp.up_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w3.weight"],
+ f"model.layers.{layer_i}.input_layernorm.weight": loaded[f"layers.{layer_i}.attention_norm.weight"],
+ f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[f"layers.{layer_i}.ffn_norm.weight"],
+ }
+ else:
+ # Sharded
+ # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
+ # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
+ # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
+
+ state_dict = {
+ f"model.layers.{layer_i}.input_layernorm.weight": loaded[0][
+ f"layers.{layer_i}.attention_norm.weight"
+ ].clone(),
+ f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
+ f"layers.{layer_i}.ffn_norm.weight"
+ ].clone(),
+ }
+ state_dict[f"model.layers.{layer_i}.self_attn.q_proj.weight"] = permute(
+ torch.cat(
+ [
+ loaded[i][f"layers.{layer_i}.attention.wq.weight"].view(n_heads_per_shard, dims_per_head, dim)
+ for i in range(num_shards)
+ ],
+ dim=0,
+ ).reshape(dim, dim)
+ )
+ state_dict[f"model.layers.{layer_i}.self_attn.k_proj.weight"] = permute(
+ torch.cat(
+ [
+ loaded[i][f"layers.{layer_i}.attention.wk.weight"].view(
+ num_local_key_value_heads, dims_per_head, dim
+ )
+ for i in range(num_shards)
+ ],
+ dim=0,
+ ).reshape(key_value_dim, dim),
+ num_key_value_heads,
+ key_value_dim,
+ dim,
+ )
+ state_dict[f"model.layers.{layer_i}.self_attn.v_proj.weight"] = torch.cat(
+ [
+ loaded[i][f"layers.{layer_i}.attention.wv.weight"].view(
+ num_local_key_value_heads, dims_per_head, dim
+ )
+ for i in range(num_shards)
+ ],
+ dim=0,
+ ).reshape(key_value_dim, dim)
+
+ state_dict[f"model.layers.{layer_i}.self_attn.o_proj.weight"] = torch.cat(
+ [loaded[i][f"layers.{layer_i}.attention.wo.weight"] for i in range(num_shards)], dim=1
+ )
+ state_dict[f"model.layers.{layer_i}.mlp.gate_proj.weight"] = torch.cat(
+ [loaded[i][f"layers.{layer_i}.feed_forward.w1.weight"] for i in range(num_shards)], dim=0
+ )
+ state_dict[f"model.layers.{layer_i}.mlp.down_proj.weight"] = torch.cat(
+ [loaded[i][f"layers.{layer_i}.feed_forward.w2.weight"] for i in range(num_shards)], dim=1
+ )
+ state_dict[f"model.layers.{layer_i}.mlp.up_proj.weight"] = torch.cat(
+ [loaded[i][f"layers.{layer_i}.feed_forward.w3.weight"] for i in range(num_shards)], dim=0
+ )
+
+ state_dict[f"model.layers.{layer_i}.self_attn.rotary_emb.inv_freq"] = inv_freq
+ for k, v in state_dict.items():
+ index_dict["weight_map"][k] = filename
+ param_count += v.numel()
+ torch.save(state_dict, os.path.join(tmp_model_path, filename))
+
+ filename = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
+ if num_shards == 1:
+ # Unsharded
+ state_dict = {
+ "model.embed_tokens.weight": loaded["tok_embeddings.weight"],
+ "model.norm.weight": loaded["norm.weight"],
+ "lm_head.weight": loaded["output.weight"],
+ }
+ else:
+ state_dict = {
+ "model.norm.weight": loaded[0]["norm.weight"],
+ "model.embed_tokens.weight": torch.cat(
+ [loaded[i]["tok_embeddings.weight"] for i in range(num_shards)], dim=1
+ ),
+ "lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(num_shards)], dim=0),
+ }
+
+ for k, v in state_dict.items():
+ index_dict["weight_map"][k] = filename
+ param_count += v.numel()
+ torch.save(state_dict, os.path.join(tmp_model_path, filename))
+
+ # Write configs
+ index_dict["metadata"] = {"total_size": param_count * 2}
+ write_json(index_dict, os.path.join(tmp_model_path, "pytorch_model.bin.index.json"))
+ ffn_dim_multiplier = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1
+ multiple_of = params["multiple_of"] if "multiple_of" in params else 256
+ config = LlamaConfig(
+ hidden_size=dim,
+ intermediate_size=compute_intermediate_size(dim, ffn_dim_multiplier, multiple_of),
+ num_attention_heads=params["n_heads"],
+ num_hidden_layers=params["n_layers"],
+ rms_norm_eps=params["norm_eps"],
+ num_key_value_heads=num_key_value_heads,
+ vocab_size=vocab_size,
+ rope_theta=base,
+ max_position_embeddings=max_position_embeddings,
+ )
+ config.save_pretrained(tmp_model_path)
+
+ # Make space so we can load the model properly now.
+ del state_dict
+ del loaded
+ gc.collect()
+
+ print("Loading the checkpoint in a Llama model.")
+ model = LlamaForCausalLM.from_pretrained(tmp_model_path, torch_dtype=torch.bfloat16, low_cpu_mem_usage=True)
+ # Avoid saving this as part of the config.
+ del model.config._name_or_path
+ model.config.torch_dtype = torch.float16
+ print("Saving in the Transformers format.")
+ model.save_pretrained(model_path, safe_serialization=safe_serialization)
+ shutil.rmtree(tmp_model_path)
+
+
+def write_tokenizer(tokenizer_path, input_tokenizer_path):
+ # Initialize the tokenizer based on the `spm` model
+ tokenizer_class = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
+ print(f"Saving a {tokenizer_class.__name__} to {tokenizer_path}.")
+ tokenizer = tokenizer_class(input_tokenizer_path)
+ tokenizer.save_pretrained(tokenizer_path)
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--input_dir",
+ help="Location of LLaMA weights, which contains tokenizer.model and model folders",
+ )
+ parser.add_argument(
+ "--model_size",
+ choices=["7B", "7Bf", "13B", "13Bf", "30B", "34B", "65B", "70B", "70Bf", "tokenizer_only"],
+ help="'f' models correspond to the finetuned versions, and are specific to the Llama2 official release. For more details on Llama2, checkout the original repo: https://huggingface.co/meta-llama",
+ )
+ parser.add_argument(
+ "--output_dir",
+ help="Location to write HF model and tokenizer",
+ )
+ parser.add_argument("--safe_serialization", type=bool, help="Whether or not to save using `safetensors`.")
+ # Different Llama versions used different default values for max_position_embeddings, hence the need to be able to specify which version is being used.
+ parser.add_argument(
+ "--llama_version",
+ choices=[1, 2],
+ default=1,
+ type=int,
+ help="Version of the Llama model to convert. Currently supports Llama1 and Llama2. Controls the context size",
+ )
+ args = parser.parse_args()
+ spm_path = os.path.join(args.input_dir, "tokenizer.model")
+ if args.model_size != "tokenizer_only":
+ write_model(
+ model_path=args.output_dir,
+ input_base_path=args.input_dir,
+ model_size=args.model_size,
+ safe_serialization=args.safe_serialization,
+ tokenizer_path=spm_path,
+ llama_version=args.llama_version,
+ )
+ else:
+ write_tokenizer(args.output_dir, spm_path)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/modeling_flax_llama.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/modeling_flax_llama.py
new file mode 100644
index 0000000000000000000000000000000000000000..73fb1cbb95504474e30810178cf8a90fd5d7a452
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/modeling_flax_llama.py
@@ -0,0 +1,738 @@
+# coding=utf-8
+# Copyright 2023 Meta AI, EleutherAI and the HuggingFace Inc. team. All rights reserved.
+#
+# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
+# and OPT implementations in this library. It has been modified from its
+# original forms to accommodate minor architectural differences compared
+# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Flax LLaMA model."""
+from functools import partial
+from typing import Optional, Tuple
+
+import flax.linen as nn
+import jax
+import jax.numpy as jnp
+import numpy as np
+from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
+from flax.linen import combine_masks, make_causal_mask
+from flax.linen.attention import dot_product_attention_weights
+from flax.traverse_util import flatten_dict, unflatten_dict
+from jax import lax
+
+from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput
+from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging
+from .configuration_llama import LlamaConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "LlamaConfig"
+_CHECKPOINT_FOR_DOC = "afmck/testing-llama-tiny"
+_REAL_CHECKPOINT_FOR_DOC = "openlm-research/open_llama_3b_v2"
+
+LLAMA_START_DOCSTRING = r"""
+
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a Flax Linen
+ [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
+ regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
+
+ Finally, this model supports inherent JAX features such as:
+
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
+
+ Parameters:
+ config ([`LlamaConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16`, or
+ `jax.numpy.bfloat16`.
+
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
+ specified all the computation will be performed with the given `dtype`.
+
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
+ parameters.**
+
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
+ [`~FlaxPreTrainedModel.to_bf16`].
+"""
+
+LLAMA_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`numpy.ndarray` of shape `(batch_size, input_ids_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
+ information on the default strategy.
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.n_positions - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
+ Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
+ auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+def create_sinusoidal_positions(num_pos, dim):
+ inv_freq = 1.0 / (10000 ** (np.arange(0, dim, 2) / dim))
+ freqs = np.einsum("i , j -> i j", np.arange(num_pos), inv_freq).astype("float32")
+
+ emb = np.concatenate((freqs, freqs), axis=-1)
+ out = np.concatenate((np.sin(emb)[:, None, :], np.cos(emb)[:, None, :]), axis=-1)
+ return jnp.array(out[:, :, :num_pos])
+
+
+def rotate_half(tensor):
+ """Rotates half the hidden dims of the input."""
+ rotate_half_tensor = jnp.concatenate(
+ (-tensor[..., tensor.shape[-1] // 2 :], tensor[..., : tensor.shape[-1] // 2]), axis=-1
+ )
+ return rotate_half_tensor
+
+
+def apply_rotary_pos_emb(tensor, sin_pos, cos_pos):
+ return (tensor * cos_pos) + (rotate_half(tensor) * sin_pos)
+
+
+class FlaxLlamaRMSNorm(nn.Module):
+ config: LlamaConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.epsilon = self.config.rms_norm_eps
+ self.weight = self.param("weight", lambda _, shape: jnp.ones(shape), self.config.hidden_size)
+
+ def __call__(self, hidden_states):
+ variance = jnp.asarray(hidden_states, dtype=jnp.float32)
+ variance = jnp.power(variance, 2)
+ variance = variance.mean(-1, keepdims=True)
+ # use `jax.numpy.sqrt` as `jax.lax.rsqrt` does not match `torch.rsqrt`
+ hidden_states = hidden_states / jnp.sqrt(variance + self.epsilon)
+
+ return self.weight * jnp.asarray(hidden_states, dtype=self.dtype)
+
+
+class FlaxLlamaRotaryEmbedding(nn.Module):
+ config: LlamaConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ head_dim = self.config.hidden_size // self.config.num_attention_heads
+ self.sincos = create_sinusoidal_positions(self.config.max_position_embeddings, head_dim)
+
+ def __call__(self, key, query, position_ids):
+ sincos = self.sincos[position_ids]
+ sin_pos, cos_pos = jnp.split(sincos, 2, axis=-1)
+
+ key = apply_rotary_pos_emb(key, sin_pos, cos_pos)
+ query = apply_rotary_pos_emb(query, sin_pos, cos_pos)
+
+ key = jnp.asarray(key, dtype=self.dtype)
+ query = jnp.asarray(query, dtype=self.dtype)
+
+ return key, query
+
+
+class FlaxLlamaAttention(nn.Module):
+ config: LlamaConfig
+ dtype: jnp.dtype = jnp.float32
+ causal: bool = True
+ is_cross_attention: bool = False
+
+ def setup(self):
+ config = self.config
+ self.embed_dim = config.hidden_size
+ self.num_heads = config.num_attention_heads
+ self.head_dim = self.embed_dim // self.num_heads
+ self.attention_softmax_in_fp32 = self.dtype is not jnp.float32
+
+ dense = partial(
+ nn.Dense,
+ self.embed_dim,
+ use_bias=config.attention_bias,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
+ )
+
+ self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
+ self.o_proj = dense()
+
+ self.causal_mask = make_causal_mask(jnp.ones((1, config.max_position_embeddings), dtype="bool"), dtype="bool")
+ self.rotary_emb = FlaxLlamaRotaryEmbedding(config, dtype=self.dtype)
+
+ def _split_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
+
+ def _merge_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
+
+ @nn.compact
+ # Copied from transformers.models.gpt_neo.modeling_flax_gpt_neo.FlaxGPTNeoSelfAttention._concatenate_to_cache
+ def _concatenate_to_cache(self, key, value, query, attention_mask):
+ """
+ This function takes projected key, value states from a single input token and concatenates the states to cached
+ states from previous steps. This function is slighly adapted from the official Flax repository:
+ https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
+ """
+ # detect if we're initializing by absence of existing cache data.
+ is_initialized = self.has_variable("cache", "cached_key")
+ cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
+ cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
+ cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
+
+ if is_initialized:
+ *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
+ # update key, value caches with our new 1d spatial slices
+ cur_index = cache_index.value
+ indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
+ key = lax.dynamic_update_slice(cached_key.value, key, indices)
+ value = lax.dynamic_update_slice(cached_value.value, value, indices)
+ cached_key.value = key
+ cached_value.value = value
+ num_updated_cache_vectors = query.shape[1]
+ cache_index.value = cache_index.value + num_updated_cache_vectors
+ # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
+ pad_mask = jnp.broadcast_to(
+ jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
+ tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
+ )
+ attention_mask = combine_masks(pad_mask, attention_mask)
+ return key, value, attention_mask
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask,
+ position_ids,
+ deterministic: bool = True,
+ init_cache: bool = False,
+ output_attentions: bool = False,
+ ):
+ query = self.q_proj(hidden_states)
+ key = self.k_proj(hidden_states)
+ value = self.v_proj(hidden_states)
+
+ query = self._split_heads(query)
+ key = self._split_heads(key)
+ value = self._split_heads(value)
+
+ key, query = self.rotary_emb(key, query, position_ids)
+
+ query_length, key_length = query.shape[1], key.shape[1]
+
+ if self.has_variable("cache", "cached_key"):
+ mask_shift = self.variables["cache"]["cache_index"]
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
+ causal_mask = lax.dynamic_slice(
+ self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
+ )
+ else:
+ causal_mask = self.causal_mask[:, :, :query_length, :key_length]
+
+ batch_size = hidden_states.shape[0]
+ causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
+
+ attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
+ attention_mask = combine_masks(attention_mask, causal_mask)
+
+ dropout_rng = None
+ if not deterministic and self.config.attention_dropout > 0.0:
+ dropout_rng = self.make_rng("dropout")
+
+ # During fast autoregressive decoding, we feed one position at a time,
+ # and cache the keys and values step by step.
+ if self.has_variable("cache", "cached_key") or init_cache:
+ key, value, attention_mask = self._concatenate_to_cache(key, value, query, attention_mask)
+
+ # transform boolean mask into float mask
+ attention_bias = lax.select(
+ attention_mask > 0,
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
+ jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
+ )
+
+ # usual dot product attention
+ attention_dtype = jnp.float32 if self.attention_softmax_in_fp32 else self.dtype
+ attn_weights = dot_product_attention_weights(
+ query,
+ key,
+ bias=attention_bias,
+ dropout_rng=dropout_rng,
+ dropout_rate=self.config.attention_dropout,
+ deterministic=deterministic,
+ dtype=attention_dtype,
+ )
+
+ if self.attention_softmax_in_fp32:
+ attn_weights = attn_weights.astype(self.dtype)
+
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value)
+ attn_output = self._merge_heads(attn_output)
+ attn_output = self.o_proj(attn_output)
+
+ outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
+ return outputs
+
+
+class FlaxLlamaMLP(nn.Module):
+ config: LlamaConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ embed_dim = self.config.hidden_size
+ inner_dim = self.config.intermediate_size if self.config.intermediate_size is not None else 4 * embed_dim
+
+ kernel_init = jax.nn.initializers.normal(self.config.initializer_range)
+ self.act = ACT2FN[self.config.hidden_act]
+
+ self.gate_proj = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, kernel_init=kernel_init)
+ self.down_proj = nn.Dense(embed_dim, use_bias=False, dtype=self.dtype, kernel_init=kernel_init)
+ self.up_proj = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, kernel_init=kernel_init)
+
+ def __call__(self, hidden_states):
+ up_proj_states = self.up_proj(hidden_states)
+ gate_states = self.act(self.gate_proj(hidden_states))
+
+ hidden_states = self.down_proj(up_proj_states * gate_states)
+ return hidden_states
+
+
+class FlaxLlamaDecoderLayer(nn.Module):
+ config: LlamaConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.input_layernorm = FlaxLlamaRMSNorm(self.config, dtype=self.dtype)
+ self.self_attn = FlaxLlamaAttention(self.config, dtype=self.dtype)
+ self.post_attention_layernorm = FlaxLlamaRMSNorm(self.config, dtype=self.dtype)
+ self.mlp = FlaxLlamaMLP(self.config, dtype=self.dtype)
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask=None,
+ position_ids=None,
+ deterministic: bool = True,
+ init_cache: bool = False,
+ output_attentions: bool = False,
+ ):
+ residual = hidden_states
+ hidden_states = self.input_layernorm(hidden_states)
+ outputs = self.self_attn(
+ hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ deterministic=deterministic,
+ init_cache=init_cache,
+ output_attentions=output_attentions,
+ )
+ # residual connection
+ attn_output = outputs[0]
+ hidden_states = residual + attn_output
+
+ residual = hidden_states
+ hidden_states = self.post_attention_layernorm(hidden_states)
+ hidden_states = self.mlp(hidden_states)
+ # residual connection
+ hidden_states = residual + hidden_states
+
+ return (hidden_states,) + outputs[1:]
+
+
+# Copied from transformers.models.gpt_neo.modeling_flax_gpt_neo.FlaxGPTNeoPreTrainedModel with GPTNeo->Llama, GPT_NEO->LLAMA, transformer->model
+class FlaxLlamaPreTrainedModel(FlaxPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = LlamaConfig
+ base_model_prefix = "model"
+ module_class: nn.Module = None
+
+ def __init__(
+ self,
+ config: LlamaConfig,
+ input_shape: Tuple = (1, 1),
+ seed: int = 0,
+ dtype: jnp.dtype = jnp.float32,
+ _do_init: bool = True,
+ **kwargs,
+ ):
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
+
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
+ # init input tensors
+ input_ids = jnp.zeros(input_shape, dtype="i4")
+ attention_mask = jnp.ones_like(input_ids)
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape)
+ params_rng, dropout_rng = jax.random.split(rng)
+ rngs = {"params": params_rng, "dropout": dropout_rng}
+
+ random_params = self.module.init(rngs, input_ids, attention_mask, position_ids, return_dict=False)["params"]
+
+ if params is not None:
+ random_params = flatten_dict(unfreeze(random_params))
+ params = flatten_dict(unfreeze(params))
+ for missing_key in self._missing_keys:
+ params[missing_key] = random_params[missing_key]
+ self._missing_keys = set()
+ return freeze(unflatten_dict(params))
+ else:
+ return random_params
+
+ def init_cache(self, batch_size, max_length):
+ r"""
+ Args:
+ batch_size (`int`):
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
+ max_length (`int`):
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
+ cache.
+ """
+ # init input variables to retrieve cache
+ input_ids = jnp.ones((batch_size, max_length))
+ attention_mask = jnp.ones_like(input_ids)
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
+
+ init_variables = self.module.init(
+ jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True
+ )
+ return unfreeze(init_variables["cache"])
+
+ @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
+ def __call__(
+ self,
+ input_ids,
+ attention_mask=None,
+ position_ids=None,
+ params: dict = None,
+ past_key_values: dict = None,
+ dropout_rng: jax.random.PRNGKey = None,
+ train: bool = False,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ):
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ batch_size, sequence_length = input_ids.shape
+
+ if position_ids is None:
+ if past_key_values is not None:
+ raise ValueError("Make sure to provide `position_ids` when passing `past_key_values`.")
+
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
+
+ if attention_mask is None:
+ attention_mask = jnp.ones((batch_size, sequence_length))
+
+ # Handle any PRNG if needed
+ rngs = {}
+ if dropout_rng is not None:
+ rngs["dropout"] = dropout_rng
+
+ inputs = {"params": params or self.params}
+
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be changed by FlaxLlamaAttention module
+ if past_key_values:
+ inputs["cache"] = past_key_values
+ mutable = ["cache"]
+ else:
+ mutable = False
+
+ outputs = self.module.apply(
+ inputs,
+ jnp.array(input_ids, dtype="i4"),
+ jnp.array(attention_mask, dtype="i4"),
+ jnp.array(position_ids, dtype="i4"),
+ not train,
+ False,
+ output_attentions,
+ output_hidden_states,
+ return_dict,
+ rngs=rngs,
+ mutable=mutable,
+ )
+
+ # add updated cache to model output
+ if past_key_values is not None and return_dict:
+ outputs, past_key_values = outputs
+ outputs["past_key_values"] = unfreeze(past_key_values["cache"])
+ return outputs
+ elif past_key_values is not None and not return_dict:
+ outputs, past_key_values = outputs
+ outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:]
+
+ return outputs
+
+
+class FlaxLlamaLayerCollection(nn.Module):
+ config: LlamaConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.blocks = [
+ FlaxLlamaDecoderLayer(self.config, dtype=self.dtype, name=str(i))
+ for i in range(self.config.num_hidden_layers)
+ ]
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask=None,
+ position_ids=None,
+ deterministic: bool = True,
+ init_cache: bool = False,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = False,
+ ):
+ all_attentions = () if output_attentions else None
+ all_hidden_states = () if output_hidden_states else None
+
+ for block in self.blocks:
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+ layer_outputs = block(
+ hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ deterministic=deterministic,
+ init_cache=init_cache,
+ output_attentions=output_attentions,
+ )
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions += (layer_outputs[1],)
+
+ # this contains possible `None` values - `FlaxLlamaModule` will filter them out
+ outputs = (hidden_states, all_hidden_states, all_attentions)
+
+ return outputs
+
+
+class FlaxLlamaModule(nn.Module):
+ config: LlamaConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.hidden_size = self.config.hidden_size
+ embedding_init = jax.nn.initializers.normal(stddev=self.config.initializer_range)
+ self.embed_tokens = nn.Embed(
+ self.config.vocab_size,
+ self.hidden_size,
+ embedding_init=embedding_init,
+ dtype=self.dtype,
+ )
+ self.layers = FlaxLlamaLayerCollection(self.config, dtype=self.dtype)
+ self.norm = FlaxLlamaRMSNorm(self.config, dtype=self.dtype)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask=None,
+ position_ids=None,
+ deterministic=True,
+ init_cache: bool = False,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ input_embeds = self.embed_tokens(input_ids.astype("i4"))
+
+ outputs = self.layers(
+ input_embeds,
+ position_ids=position_ids,
+ attention_mask=attention_mask,
+ deterministic=deterministic,
+ init_cache=init_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ hidden_states = self.norm(hidden_states)
+
+ if output_hidden_states:
+ all_hidden_states = outputs[1] + (hidden_states,)
+ outputs = (hidden_states, all_hidden_states) + outputs[2:]
+ else:
+ outputs = (hidden_states,) + outputs[1:]
+
+ if not return_dict:
+ return tuple(v for v in outputs if v is not None)
+
+ return FlaxBaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=outputs[1],
+ attentions=outputs[-1],
+ )
+
+
+@add_start_docstrings(
+ "The bare Llama Model transformer outputting raw hidden-states without any specific head on top.",
+ LLAMA_START_DOCSTRING,
+)
+class FlaxLlamaModel(FlaxLlamaPreTrainedModel):
+ module_class = FlaxLlamaModule
+
+
+append_call_sample_docstring(
+ FlaxLlamaModel,
+ _CHECKPOINT_FOR_DOC,
+ FlaxBaseModelOutput,
+ _CONFIG_FOR_DOC,
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
+)
+
+
+class FlaxLlamaForCausalLMModule(nn.Module):
+ config: LlamaConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.model = FlaxLlamaModule(self.config, dtype=self.dtype)
+ self.lm_head = nn.Dense(
+ self.config.vocab_size,
+ use_bias=False,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
+ )
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask=None,
+ position_ids=None,
+ deterministic: bool = True,
+ init_cache: bool = False,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ outputs = self.model(
+ input_ids,
+ position_ids=position_ids,
+ attention_mask=attention_mask,
+ deterministic=deterministic,
+ init_cache=init_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ lm_logits = self.lm_head(hidden_states)
+
+ if not return_dict:
+ return (lm_logits,) + outputs[1:]
+
+ return FlaxCausalLMOutput(logits=lm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
+
+
+@add_start_docstrings(
+ """
+ The Llama Model transformer with a language modeling head (linear layer) on top.
+ """,
+ LLAMA_START_DOCSTRING,
+)
+# Copied from transformers.models.gptj.modeling_flax_gptj.FlaxGPTJForCausalLM with GPTJ->Llama
+class FlaxLlamaForCausalLM(FlaxLlamaPreTrainedModel):
+ module_class = FlaxLlamaForCausalLMModule
+
+ def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None):
+ # initializing the cache
+ batch_size, seq_length = input_ids.shape
+
+ past_key_values = self.init_cache(batch_size, max_length)
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
+ # But since Llama uses a causal mask, those positions are masked anyways.
+ # Thus we can create a single static attention_mask here, which is more efficient for compilation
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
+ if attention_mask is not None:
+ position_ids = attention_mask.cumsum(axis=-1) - 1
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0))
+ else:
+ position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
+
+ return {
+ "past_key_values": past_key_values,
+ "attention_mask": extended_attention_mask,
+ "position_ids": position_ids,
+ }
+
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
+ model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1
+ return model_kwargs
+
+
+append_call_sample_docstring(
+ FlaxLlamaForCausalLM,
+ _CHECKPOINT_FOR_DOC,
+ FlaxCausalLMOutput,
+ _CONFIG_FOR_DOC,
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
+)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/modeling_llama.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/modeling_llama.py
new file mode 100644
index 0000000000000000000000000000000000000000..2574a45cab4894e01a6aa6a113cbcbc3dd3e1ec6
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/modeling_llama.py
@@ -0,0 +1,1554 @@
+# coding=utf-8
+# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
+#
+# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
+# and OPT implementations in this library. It has been modified from its
+# original forms to accommodate minor architectural differences compared
+# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch LLaMA model."""
+
+import math
+import warnings
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.nn.functional as F
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...cache_utils import Cache, DynamicCache, StaticCache
+from ...modeling_attn_mask_utils import AttentionMaskConverter
+from ...modeling_outputs import (
+ BaseModelOutputWithPast,
+ CausalLMOutputWithPast,
+ QuestionAnsweringModelOutput,
+ SequenceClassifierOutputWithPast,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import ALL_LAYERNORM_LAYERS
+from ...utils import (
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_flash_attn_2_available,
+ is_flash_attn_greater_or_equal_2_10,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_llama import LlamaConfig
+
+
+if is_flash_attn_2_available():
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "LlamaConfig"
+
+
+def _get_unpad_data(attention_mask):
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
+ return (
+ indices,
+ cu_seqlens,
+ max_seqlen_in_batch,
+ )
+
+
+class LlamaRMSNorm(nn.Module):
+ def __init__(self, hidden_size, eps=1e-6):
+ """
+ LlamaRMSNorm is equivalent to T5LayerNorm
+ """
+ super().__init__()
+ self.weight = nn.Parameter(torch.ones(hidden_size))
+ self.variance_epsilon = eps
+
+ def forward(self, hidden_states):
+ input_dtype = hidden_states.dtype
+ hidden_states = hidden_states.to(torch.float32)
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
+ return self.weight * hidden_states.to(input_dtype)
+
+
+ALL_LAYERNORM_LAYERS.append(LlamaRMSNorm)
+
+
+class LlamaRotaryEmbedding(nn.Module):
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
+ super().__init__()
+ self.scaling_factor = scaling_factor
+ self.dim = dim
+ self.max_position_embeddings = max_position_embeddings
+ self.base = base
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
+ # For BC we register cos and sin cached
+ self.max_seq_len_cached = max_position_embeddings
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
+ t = t / self.scaling_factor
+ freqs = torch.outer(t, self.inv_freq)
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
+ emb = torch.cat((freqs, freqs), dim=-1)
+ self.register_buffer("_cos_cached", emb.cos().to(torch.get_default_dtype()), persistent=False)
+ self.register_buffer("_sin_cached", emb.sin().to(torch.get_default_dtype()), persistent=False)
+
+ @property
+ def sin_cached(self):
+ logger.warning_once(
+ "The sin_cached attribute will be removed in 4.39. Bear in mind that its contents changed in v4.38. Use "
+ "the forward method of RoPE from now on instead. It is not used in the `LlamaAttention` class"
+ )
+ return self._sin_cached
+
+ @property
+ def cos_cached(self):
+ logger.warning_once(
+ "The cos_cached attribute will be removed in 4.39. Bear in mind that its contents changed in v4.38. Use "
+ "the forward method of RoPE from now on instead. It is not used in the `LlamaAttention` class"
+ )
+ return self._cos_cached
+
+ @torch.no_grad()
+ def forward(self, x, position_ids):
+ # x: [bs, num_attention_heads, seq_len, head_size]
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
+ position_ids_expanded = position_ids[:, None, :].float()
+ # Force float32 since bfloat16 loses precision on long contexts
+ # See https://github.com/huggingface/transformers/pull/29285
+ device_type = x.device.type
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
+ with torch.autocast(device_type=device_type, enabled=False):
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
+ emb = torch.cat((freqs, freqs), dim=-1)
+ cos = emb.cos()
+ sin = emb.sin()
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
+
+
+class LlamaLinearScalingRotaryEmbedding(LlamaRotaryEmbedding):
+ """LlamaRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
+
+ def forward(self, x, position_ids):
+ # difference to the original RoPE: a scaling factor is aplied to the position ids
+ position_ids = position_ids.float() / self.scaling_factor
+ cos, sin = super().forward(x, position_ids)
+ return cos, sin
+
+
+class LlamaDynamicNTKScalingRotaryEmbedding(LlamaRotaryEmbedding):
+ """LlamaRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla"""
+
+ def forward(self, x, position_ids):
+ # difference to the original RoPE: inv_freq is recomputed when the sequence length > original length
+ seq_len = torch.max(position_ids) + 1
+ if seq_len > self.max_position_embeddings:
+ base = self.base * (
+ (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
+ ) ** (self.dim / (self.dim - 2))
+ inv_freq = 1.0 / (
+ base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(x.device) / self.dim)
+ )
+ self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: this may break with compilation
+
+ cos, sin = super().forward(x, position_ids)
+ return cos, sin
+
+
+def rotate_half(x):
+ """Rotates half the hidden dims of the input."""
+ x1 = x[..., : x.shape[-1] // 2]
+ x2 = x[..., x.shape[-1] // 2 :]
+ return torch.cat((-x2, x1), dim=-1)
+
+
+def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
+ """Applies Rotary Position Embedding to the query and key tensors.
+
+ Args:
+ q (`torch.Tensor`): The query tensor.
+ k (`torch.Tensor`): The key tensor.
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
+ position_ids (`torch.Tensor`, *optional*):
+ Deprecated and unused.
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
+ Returns:
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
+ """
+ cos = cos.unsqueeze(unsqueeze_dim)
+ sin = sin.unsqueeze(unsqueeze_dim)
+ q_embed = (q * cos) + (rotate_half(q) * sin)
+ k_embed = (k * cos) + (rotate_half(k) * sin)
+ return q_embed, k_embed
+
+
+class LlamaMLP(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.hidden_size = config.hidden_size
+ self.intermediate_size = config.intermediate_size
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
+ self.act_fn = ACT2FN[config.hidden_act]
+
+ def forward(self, x):
+ if self.config.pretraining_tp > 1:
+ slice = self.intermediate_size // self.config.pretraining_tp
+ gate_proj_slices = self.gate_proj.weight.split(slice, dim=0)
+ up_proj_slices = self.up_proj.weight.split(slice, dim=0)
+ down_proj_slices = self.down_proj.weight.split(slice, dim=1)
+
+ gate_proj = torch.cat(
+ [F.linear(x, gate_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1
+ )
+ up_proj = torch.cat([F.linear(x, up_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1)
+
+ intermediate_states = (self.act_fn(gate_proj) * up_proj).split(slice, dim=2)
+ down_proj = [
+ F.linear(intermediate_states[i], down_proj_slices[i]) for i in range(self.config.pretraining_tp)
+ ]
+ down_proj = sum(down_proj)
+ else:
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
+
+ return down_proj
+
+
+def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
+ """
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
+ """
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
+ if n_rep == 1:
+ return hidden_states
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
+
+
+class LlamaAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(self, config: LlamaConfig, layer_idx: Optional[int] = None):
+ super().__init__()
+ self.config = config
+ self.layer_idx = layer_idx
+ if layer_idx is None:
+ logger.warning_once(
+ f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
+ "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
+ "when creating this class."
+ )
+
+ self.attention_dropout = config.attention_dropout
+ self.hidden_size = config.hidden_size
+ self.num_heads = config.num_attention_heads
+ self.head_dim = self.hidden_size // self.num_heads
+ self.num_key_value_heads = config.num_key_value_heads
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
+ self.max_position_embeddings = config.max_position_embeddings
+ self.rope_theta = config.rope_theta
+ self.is_causal = True
+
+ if (self.head_dim * self.num_heads) != self.hidden_size:
+ raise ValueError(
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
+ f" and `num_heads`: {self.num_heads})."
+ )
+
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
+ self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=config.attention_bias)
+ self._init_rope()
+
+ def _init_rope(self):
+ if self.config.rope_scaling is None:
+ self.rotary_emb = LlamaRotaryEmbedding(
+ self.head_dim,
+ max_position_embeddings=self.max_position_embeddings,
+ base=self.rope_theta,
+ )
+ else:
+ scaling_type = self.config.rope_scaling["type"]
+ scaling_factor = self.config.rope_scaling["factor"]
+ if scaling_type == "linear":
+ self.rotary_emb = LlamaLinearScalingRotaryEmbedding(
+ self.head_dim,
+ max_position_embeddings=self.max_position_embeddings,
+ scaling_factor=scaling_factor,
+ base=self.rope_theta,
+ )
+ elif scaling_type == "dynamic":
+ self.rotary_emb = LlamaDynamicNTKScalingRotaryEmbedding(
+ self.head_dim,
+ max_position_embeddings=self.max_position_embeddings,
+ scaling_factor=scaling_factor,
+ base=self.rope_theta,
+ )
+ else:
+ raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ cache_position: Optional[torch.LongTensor] = None,
+ **kwargs,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ bsz, q_len, _ = hidden_states.size()
+
+ if self.config.pretraining_tp > 1:
+ key_value_slicing = (self.num_key_value_heads * self.head_dim) // self.config.pretraining_tp
+ query_slices = self.q_proj.weight.split(
+ (self.num_heads * self.head_dim) // self.config.pretraining_tp, dim=0
+ )
+ key_slices = self.k_proj.weight.split(key_value_slicing, dim=0)
+ value_slices = self.v_proj.weight.split(key_value_slicing, dim=0)
+
+ query_states = [F.linear(hidden_states, query_slices[i]) for i in range(self.config.pretraining_tp)]
+ query_states = torch.cat(query_states, dim=-1)
+
+ key_states = [F.linear(hidden_states, key_slices[i]) for i in range(self.config.pretraining_tp)]
+ key_states = torch.cat(key_states, dim=-1)
+
+ value_states = [F.linear(hidden_states, value_slices[i]) for i in range(self.config.pretraining_tp)]
+ value_states = torch.cat(value_states, dim=-1)
+
+ else:
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ past_key_value = getattr(self, "past_key_value", past_key_value)
+ cos, sin = self.rotary_emb(value_states, position_ids)
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
+
+ if past_key_value is not None:
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
+
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
+
+ if attention_mask is not None: # no matter the length, we just slice it
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
+ attn_weights = attn_weights + causal_mask
+
+ # upcast attention to fp32
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
+ attn_output = torch.matmul(attn_weights, value_states)
+
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.transpose(1, 2).contiguous()
+
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
+
+ if self.config.pretraining_tp > 1:
+ attn_output = attn_output.split(self.hidden_size // self.config.pretraining_tp, dim=2)
+ o_proj_slices = self.o_proj.weight.split(self.hidden_size // self.config.pretraining_tp, dim=1)
+ attn_output = sum([F.linear(attn_output[i], o_proj_slices[i]) for i in range(self.config.pretraining_tp)])
+ else:
+ attn_output = self.o_proj(attn_output)
+
+ if not output_attentions:
+ attn_weights = None
+
+ return attn_output, attn_weights, past_key_value
+
+
+class LlamaFlashAttention2(LlamaAttention):
+ """
+ Llama flash attention module. This module inherits from `LlamaAttention` as the weights of the module stays
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
+ flash attention and deal with padding tokens in case the input contains any of them.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ cache_position: Optional[torch.LongTensor] = None,
+ **kwargs,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ output_attentions = False
+
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ # Flash attention requires the input to have the shape
+ # batch_size x seq_length x head_dim x hidden_dim
+ # therefore we just need to keep the original shape
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ cos, sin = self.rotary_emb(value_states, position_ids)
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
+
+ past_key_value = getattr(self, "past_key_value", past_key_value)
+
+ if past_key_value is not None:
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
+ # to be able to avoid many of these transpose/reshape/view.
+ query_states = query_states.transpose(1, 2)
+ key_states = key_states.transpose(1, 2)
+ value_states = value_states.transpose(1, 2)
+
+ dropout_rate = self.attention_dropout if self.training else 0.0
+
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
+ # cast them back in the correct dtype just to be sure everything works as expected.
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
+ # in fp32. (LlamaRMSNorm handles it correctly)
+
+ input_dtype = query_states.dtype
+ if input_dtype == torch.float32:
+ if torch.is_autocast_enabled():
+ target_dtype = torch.get_autocast_gpu_dtype()
+ # Handle the case where the model is quantized
+ elif hasattr(self.config, "_pre_quantization_dtype"):
+ target_dtype = self.config._pre_quantization_dtype
+ else:
+ target_dtype = self.q_proj.weight.dtype
+
+ logger.warning_once(
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
+ f" {target_dtype}."
+ )
+
+ query_states = query_states.to(target_dtype)
+ key_states = key_states.to(target_dtype)
+ value_states = value_states.to(target_dtype)
+
+ attn_output = self._flash_attention_forward(
+ query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate
+ )
+
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
+ attn_output = self.o_proj(attn_output)
+
+ if not output_attentions:
+ attn_weights = None
+
+ return attn_output, attn_weights, past_key_value
+
+ def _flash_attention_forward(
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
+ ):
+ """
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
+ first unpad the input, then computes the attention scores and pad the final attention scores.
+
+ Args:
+ query_states (`torch.Tensor`):
+ Input query states to be passed to Flash Attention API
+ key_states (`torch.Tensor`):
+ Input key states to be passed to Flash Attention API
+ value_states (`torch.Tensor`):
+ Input value states to be passed to Flash Attention API
+ attention_mask (`torch.Tensor`):
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
+ position of padding tokens and 1 for the position of non-padding tokens.
+ dropout (`float`):
+ Attention dropout
+ softmax_scale (`float`, *optional*):
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
+ """
+ if not self._flash_attn_uses_top_left_mask:
+ causal = self.is_causal
+ else:
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
+ causal = self.is_causal and query_length != 1
+
+ # Contains at least one padding token in the sequence
+ if attention_mask is not None:
+ batch_size = query_states.shape[0]
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
+ query_states, key_states, value_states, attention_mask, query_length
+ )
+
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
+
+ attn_output_unpad = flash_attn_varlen_func(
+ query_states,
+ key_states,
+ value_states,
+ cu_seqlens_q=cu_seqlens_q,
+ cu_seqlens_k=cu_seqlens_k,
+ max_seqlen_q=max_seqlen_in_batch_q,
+ max_seqlen_k=max_seqlen_in_batch_k,
+ dropout_p=dropout,
+ softmax_scale=softmax_scale,
+ causal=causal,
+ )
+
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
+ else:
+ attn_output = flash_attn_func(
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
+ )
+
+ return attn_output
+
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
+
+ key_layer = index_first_axis(
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
+ )
+ value_layer = index_first_axis(
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
+ )
+ if query_length == kv_seq_len:
+ query_layer = index_first_axis(
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
+ )
+ cu_seqlens_q = cu_seqlens_k
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
+ indices_q = indices_k
+ elif query_length == 1:
+ max_seqlen_in_batch_q = 1
+ cu_seqlens_q = torch.arange(
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
+ ) # There is a memcpy here, that is very bad.
+ indices_q = cu_seqlens_q[:-1]
+ query_layer = query_layer.squeeze(1)
+ else:
+ # The -q_len: slice assumes left padding.
+ attention_mask = attention_mask[:, -query_length:]
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
+
+ return (
+ query_layer,
+ key_layer,
+ value_layer,
+ indices_q,
+ (cu_seqlens_q, cu_seqlens_k),
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
+ )
+
+
+class LlamaSdpaAttention(LlamaAttention):
+ """
+ Llama attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
+ `LlamaAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
+ SDPA API.
+ """
+
+ # Adapted from LlamaAttention.forward
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ cache_position: Optional[torch.LongTensor] = None,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ if output_attentions:
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
+ logger.warning_once(
+ "LlamaModel is using LlamaSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
+ )
+ return super().forward(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ )
+
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ cos, sin = self.rotary_emb(value_states, position_ids)
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
+
+ # In case static cache is used, it is an instance attribute.
+ past_key_value = getattr(self, "past_key_value", past_key_value)
+
+ if past_key_value is not None:
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
+
+ causal_mask = attention_mask
+ # if attention_mask is not None and cache_position is not None:
+ if attention_mask is not None:
+ causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]
+
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
+ if query_states.device.type == "cuda" and causal_mask is not None:
+ query_states = query_states.contiguous()
+ key_states = key_states.contiguous()
+ value_states = value_states.contiguous()
+
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
+ query_states,
+ key_states,
+ value_states,
+ attn_mask=causal_mask,
+ dropout_p=self.attention_dropout if self.training else 0.0,
+ )
+
+ attn_output = attn_output.transpose(1, 2).contiguous()
+ attn_output = attn_output.view(bsz, q_len, self.hidden_size)
+
+ attn_output = self.o_proj(attn_output)
+
+ return attn_output, None, past_key_value
+
+
+LLAMA_ATTENTION_CLASSES = {
+ "eager": LlamaAttention,
+ "flash_attention_2": LlamaFlashAttention2,
+ "sdpa": LlamaSdpaAttention,
+}
+
+
+class LlamaDecoderLayer(nn.Module):
+ def __init__(self, config: LlamaConfig, layer_idx: int):
+ super().__init__()
+ self.hidden_size = config.hidden_size
+
+ self.self_attn = LLAMA_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
+
+ self.mlp = LlamaMLP(config)
+ self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+ self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = False,
+ cache_position: Optional[torch.LongTensor] = None,
+ **kwargs,
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`, *optional*):
+ attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
+ query_sequence_length, key_sequence_length)` if default attention is used.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
+ """
+ if "padding_mask" in kwargs:
+ warnings.warn(
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
+ )
+
+ residual = hidden_states
+
+ hidden_states = self.input_layernorm(hidden_states)
+
+ # Self Attention
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ **kwargs,
+ )
+ hidden_states = residual + hidden_states
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.post_attention_layernorm(hidden_states)
+ hidden_states = self.mlp(hidden_states)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights,)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ return outputs
+
+
+LLAMA_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`LlamaConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+@add_start_docstrings(
+ "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
+ LLAMA_START_DOCSTRING,
+)
+class LlamaPreTrainedModel(PreTrainedModel):
+ config_class = LlamaConfig
+ base_model_prefix = "model"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["LlamaDecoderLayer"]
+ _skip_keys_device_placement = ["past_key_values"]
+ _supports_flash_attn_2 = True
+ _supports_sdpa = True
+ _supports_cache_class = True
+
+ def _init_weights(self, module):
+ std = self.config.initializer_range
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+ def _setup_cache(self, cache_cls, max_batch_size, max_cache_len: Optional[int] = None):
+ if self.config._attn_implementation == "flash_attention_2" and cache_cls == StaticCache:
+ raise ValueError(
+ "`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` "
+ "make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers"
+ )
+
+ for layer in self.model.layers:
+ device = layer.input_layernorm.weight.device
+ if hasattr(self.config, "_pre_quantization_dtype"):
+ dtype = self.config._pre_quantization_dtype
+ else:
+ dtype = layer.self_attn.o_proj.weight.dtype
+ layer.self_attn.past_key_value = cache_cls(
+ self.config, max_batch_size, max_cache_len, device=device, dtype=dtype
+ )
+
+ def _reset_cache(self):
+ for layer in self.model.layers:
+ layer.self_attn.past_key_value = None
+
+
+LLAMA_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
+ `past_key_values`).
+
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
+ information on the default strategy.
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.n_positions - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
+
+ Two formats are allowed:
+ - a [`~cache_utils.Cache`] instance;
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
+ cache format.
+
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
+ legacy cache format will be returned.
+
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
+ of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
+ the complete sequence length.
+"""
+
+
+@add_start_docstrings(
+ "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
+ LLAMA_START_DOCSTRING,
+)
+class LlamaModel(LlamaPreTrainedModel):
+ """
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`]
+
+ Args:
+ config: LlamaConfig
+ """
+
+ def __init__(self, config: LlamaConfig):
+ super().__init__(config)
+ self.padding_idx = config.pad_token_id
+ self.vocab_size = config.vocab_size
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
+ self.layers = nn.ModuleList(
+ [LlamaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
+ )
+ self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+ self.gradient_checkpointing = False
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.embed_tokens = value
+
+ @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if (input_ids is None) ^ (inputs_embeds is not None):
+ raise ValueError(
+ "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
+ )
+
+ if self.gradient_checkpointing and self.training and use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
+ )
+ use_cache = False
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids)
+
+ past_seen_tokens = 0
+ if use_cache: # kept for BC (cache positions)
+ if not isinstance(past_key_values, StaticCache):
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
+ past_seen_tokens = past_key_values.get_seq_length()
+
+ if cache_position is None:
+ if isinstance(past_key_values, StaticCache):
+ raise ValueError("cache_position is a required argument when using StaticCache.")
+ cache_position = torch.arange(
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
+ )
+
+ if position_ids is None:
+ position_ids = cache_position.unsqueeze(0)
+
+ causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position)
+
+ # embed positions
+ hidden_states = inputs_embeds
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ next_decoder_cache = None
+
+ for decoder_layer in self.layers:
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ causal_mask,
+ position_ids,
+ past_key_values,
+ output_attentions,
+ use_cache,
+ cache_position,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=causal_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_values,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ hidden_states = self.norm(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ next_cache = None
+ if use_cache:
+ next_cache = (
+ next_decoder_cache.to_legacy_cache() if isinstance(next_decoder_cache, Cache) else next_decoder_cache
+ )
+ if not return_dict:
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
+ return BaseModelOutputWithPast(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ )
+
+ # TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static
+ # KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes.
+ # (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using
+ # `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114
+ def _update_causal_mask(self, attention_mask, input_tensor, cache_position):
+ if self.config._attn_implementation == "flash_attention_2":
+ if attention_mask is not None and 0.0 in attention_mask:
+ return attention_mask
+ return None
+
+ dtype, device = input_tensor.dtype, input_tensor.device
+ min_dtype = torch.finfo(dtype).min
+ sequence_length = input_tensor.shape[1]
+ if hasattr(getattr(self.layers[0], "self_attn", {}), "past_key_value"): # static cache
+ target_length = self.config.max_position_embeddings
+ else: # dynamic cache
+ target_length = (
+ attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else cache_position[-1] + 1
+ )
+
+ causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
+ if sequence_length != 1:
+ causal_mask = torch.triu(causal_mask, diagonal=1)
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
+ causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1)
+ if attention_mask is not None:
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
+ if attention_mask.dim() == 2:
+ mask_length = attention_mask.shape[-1]
+ padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0)
+ causal_mask[..., :mask_length] = causal_mask[..., :mask_length].masked_fill(padding_mask, min_dtype)
+ elif attention_mask.dim() == 4:
+ # backwards compatibility: we allow passing a 4D attention mask shorter than the input length with
+ # cache. In that case, the 4D attention mask attends to the newest tokens only.
+ if attention_mask.shape[-2] < cache_position[0] + sequence_length:
+ offset = cache_position[0]
+ else:
+ offset = 0
+ mask_shape = attention_mask.shape
+ mask_slice = (attention_mask.eq(0.0)).to(dtype=dtype) * min_dtype
+ causal_mask[
+ : mask_shape[0], : mask_shape[1], offset : mask_shape[2] + offset, : mask_shape[3]
+ ] = mask_slice
+
+ if (
+ self.config._attn_implementation == "sdpa"
+ and attention_mask is not None
+ and attention_mask.device.type == "cuda"
+ ):
+ # TODO: For dynamo, rather use a check on fullgraph=True once this is possible (https://github.com/pytorch/pytorch/pull/120400).
+ is_tracing = (
+ torch.jit.is_tracing()
+ or isinstance(input_tensor, torch.fx.Proxy)
+ or (hasattr(torch, "_dynamo") and torch._dynamo.is_compiling())
+ )
+ if not is_tracing and torch.any(attention_mask != 1):
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
+ # Details: https://github.com/pytorch/pytorch/issues/110213
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
+
+ return causal_mask
+
+
+class LlamaForCausalLM(LlamaPreTrainedModel):
+ _tied_weights_keys = ["lm_head.weight"]
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.model = LlamaModel(config)
+ self.vocab_size = config.vocab_size
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.embed_tokens = value
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def set_decoder(self, decoder):
+ self.model = decoder
+
+ def get_decoder(self):
+ return self.model
+
+ @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
+ r"""
+ Args:
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, LlamaForCausalLM
+
+ >>> model = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf")
+ >>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf")
+
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
+
+ >>> # Generate
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
+ outputs = self.model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ cache_position=cache_position,
+ )
+
+ hidden_states = outputs[0]
+ if self.config.pretraining_tp > 1:
+ lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0)
+ logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)]
+ logits = torch.cat(logits, dim=-1)
+ else:
+ logits = self.lm_head(hidden_states)
+ logits = logits.float()
+
+ loss = None
+ if labels is not None:
+ # Shift so that tokens < n predict n
+ shift_logits = logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous()
+ # Flatten the tokens
+ loss_fct = CrossEntropyLoss()
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
+ shift_labels = shift_labels.view(-1)
+ # Enable model parallelism
+ shift_labels = shift_labels.to(shift_logits.device)
+ loss = loss_fct(shift_logits, shift_labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return (loss,) + output if loss is not None else output
+
+ return CausalLMOutputWithPast(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, **kwargs
+ ):
+ # With static cache, the `past_key_values` is None
+ # TODO joao: standardize interface for the different Cache classes and remove of this if
+ has_static_cache = False
+ if past_key_values is None:
+ past_key_values = getattr(getattr(self.model.layers[0], "self_attn", {}), "past_key_value", None)
+ has_static_cache = past_key_values is not None
+
+ past_length = 0
+ if past_key_values is not None:
+ if isinstance(past_key_values, Cache):
+ past_length = cache_position[0] if cache_position is not None else past_key_values.get_seq_length()
+ max_cache_length = (
+ torch.tensor(past_key_values.get_max_length(), device=input_ids.device)
+ if past_key_values.get_max_length() is not None
+ else None
+ )
+ cache_length = past_length if max_cache_length is None else torch.min(max_cache_length, past_length)
+ # TODO joao: remove this `else` after `generate` prioritizes `Cache` objects
+ else:
+ cache_length = past_length = past_key_values[0][0].shape[2]
+ max_cache_length = None
+
+ # Keep only the unprocessed tokens:
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
+ # input)
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
+ # input_ids based on the past_length.
+ elif past_length < input_ids.shape[1]:
+ input_ids = input_ids[:, past_length:]
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
+
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
+ if (
+ max_cache_length is not None
+ and attention_mask is not None
+ and cache_length + input_ids.shape[1] > max_cache_length
+ ):
+ attention_mask = attention_mask[:, -max_cache_length:]
+
+ position_ids = kwargs.get("position_ids", None)
+ if attention_mask is not None and position_ids is None:
+ # create position_ids on the fly for batch generation
+ position_ids = attention_mask.long().cumsum(-1) - 1
+ position_ids.masked_fill_(attention_mask == 0, 1)
+ if past_key_values:
+ position_ids = position_ids[:, -input_ids.shape[1] :]
+
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
+ if inputs_embeds is not None and past_key_values is None:
+ model_inputs = {"inputs_embeds": inputs_embeds}
+ else:
+ # The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
+ # recompiles graphs as the stride of the inputs is a guard. Ref: https://github.com/huggingface/transformers/pull/29114
+ # TODO: use `next_tokens` directly instead.
+ model_inputs = {"input_ids": input_ids.contiguous()}
+
+ input_length = position_ids.shape[-1] if position_ids is not None else input_ids.shape[-1]
+ if cache_position is None:
+ cache_position = torch.arange(past_length, past_length + input_length, device=input_ids.device)
+ else:
+ cache_position = cache_position[-input_length:]
+
+ if has_static_cache:
+ past_key_values = None
+
+ model_inputs.update(
+ {
+ "position_ids": position_ids,
+ "cache_position": cache_position,
+ "past_key_values": past_key_values,
+ "use_cache": kwargs.get("use_cache"),
+ "attention_mask": attention_mask,
+ }
+ )
+ return model_inputs
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+ return reordered_past
+
+
+@add_start_docstrings(
+ """
+ The LLaMa Model transformer with a sequence classification head on top (linear layer).
+
+ [`LlamaForSequenceClassification`] uses the last token in order to do the classification, as other causal models
+ (e.g. GPT-2) do.
+
+ Since it does classification on the last token, it requires to know the position of the last token. If a
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
+ each row of the batch).
+ """,
+ LLAMA_START_DOCSTRING,
+)
+class LlamaForSequenceClassification(LlamaPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.model = LlamaModel(config)
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.embed_tokens = value
+
+ @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.model(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = transformer_outputs[0]
+ logits = self.score(hidden_states)
+
+ if input_ids is not None:
+ batch_size = input_ids.shape[0]
+ else:
+ batch_size = inputs_embeds.shape[0]
+
+ if self.config.pad_token_id is None and batch_size != 1:
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
+ if self.config.pad_token_id is None:
+ sequence_lengths = -1
+ else:
+ if input_ids is not None:
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
+ sequence_lengths = sequence_lengths.to(logits.device)
+ else:
+ sequence_lengths = -1
+
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
+
+ loss = None
+ if labels is not None:
+ labels = labels.to(logits.device)
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(pooled_logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(pooled_logits, labels)
+ if not return_dict:
+ output = (pooled_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutputWithPast(
+ loss=loss,
+ logits=pooled_logits,
+ past_key_values=transformer_outputs.past_key_values,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+The Llama Model transformer with a span classification head on top for extractive question-answering tasks like
+SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ LLAMA_START_DOCSTRING,
+)
+class LlamaForQuestionAnswering(LlamaPreTrainedModel):
+ base_model_prefix = "transformer"
+
+ # Copied from transformers.models.bloom.modeling_bloom.BloomForQuestionAnswering.__init__ with Bloom->Llama
+ def __init__(self, config):
+ super().__init__(config)
+ self.transformer = LlamaModel(config)
+ self.qa_outputs = nn.Linear(config.hidden_size, 2)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.transformer.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.transformer.embed_tokens = value
+
+ @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ start_positions: Optional[torch.LongTensor] = None,
+ end_positions: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.transformer(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = logits.split(1, dim=-1)
+ start_logits = start_logits.squeeze(-1).contiguous()
+ end_logits = end_logits.squeeze(-1).contiguous()
+
+ total_loss = None
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, split add a dimension
+ if len(start_positions.size()) > 1:
+ start_positions = start_positions.squeeze(-1).to(start_logits.device)
+ if len(end_positions.size()) > 1:
+ end_positions = end_positions.squeeze(-1).to(end_logits.device)
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
+ ignored_index = start_logits.size(1)
+ start_positions = start_positions.clamp(0, ignored_index)
+ end_positions = end_positions.clamp(0, ignored_index)
+
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[2:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return QuestionAnsweringModelOutput(
+ loss=total_loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/tokenization_llama.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/tokenization_llama.py
new file mode 100644
index 0000000000000000000000000000000000000000..2f8997274ce7588c2859c27282e053fe2336e99c
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/tokenization_llama.py
@@ -0,0 +1,482 @@
+# coding=utf-8
+# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
+#
+# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
+# and OPT implementations in this library. It has been modified from its
+# original forms to accommodate minor architectural differences compared
+# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tokenization classes for LLaMA."""
+import os
+from shutil import copyfile
+from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
+
+import sentencepiece as spm
+
+from ...convert_slow_tokenizer import import_protobuf
+from ...tokenization_utils import AddedToken, PreTrainedTokenizer
+from ...utils import logging
+
+
+if TYPE_CHECKING:
+ from ...tokenization_utils_base import TextInput
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {
+ "hf-internal-testing/llama-tokenizer": "https://huggingface.co/hf-internal-testing/llama-tokenizer/resolve/main/tokenizer.model",
+ },
+ "tokenizer_file": {
+ "hf-internal-testing/llama-tokenizer": "https://huggingface.co/hf-internal-testing/llama-tokenizer/resolve/main/tokenizer_config.json",
+ },
+}
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
+ "hf-internal-testing/llama-tokenizer": 2048,
+}
+SPIECE_UNDERLINE = "▁"
+
+B_INST, E_INST = "[INST]", "[/INST]"
+B_SYS, E_SYS = "<>\n", "\n<>\n\n"
+
+# fmt: off
+DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your \
+answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure\
+ that your responses are socially unbiased and positive in nature.
+
+If a question does not make any sense, or is not factually coherent, explain why instead of answering something not \
+correct. If you don't know the answer to a question, please don't share false information."""
+# fmt: on
+
+
+class LlamaTokenizer(PreTrainedTokenizer):
+ """
+ Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding. The default padding token is unset as there is
+ no padding token in the original model.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+ eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `""`):
+ The end of sequence token.
+ pad_token (`str` or `tokenizers.AddedToken`, *optional*):
+ A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
+ attention mechanisms or loss computation.
+ sp_model_kwargs (`Dict[str, Any]`, `Optional`, *optional*):
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
+ to set:
+
+ - `enable_sampling`: Enable subword regularization.
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
+
+ - `nbest_size = {0,1}`: No sampling is performed.
+ - `nbest_size > 1`: samples from the nbest_size results.
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
+ using forward-filtering-and-backward-sampling algorithm.
+
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
+ BPE-dropout.
+
+ add_bos_token (`bool`, *optional*, defaults to `True`):
+ Whether or not to add an `bos_token` at the start of sequences.
+ add_eos_token (`bool`, *optional*, defaults to `False`):
+ Whether or not to add an `eos_token` at the end of sequences.
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
+ Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
+ extra spaces.
+ use_default_system_prompt (`bool`, *optional*, defaults to `False`):
+ Whether or not the default system prompt for Llama should be used.
+ spaces_between_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not to add spaces between special tokens.
+ legacy (`bool`, *optional*):
+ Whether or not the `legacy` behavior of the tokenizer should be used. Legacy is before the merge of #24622
+ and #25224 which includes fixes to properly handle tokens that appear after special tokens. A simple
+ example:
+
+ - `legacy=True`:
+ ```python
+ >>> from transformers import T5Tokenizer
+
+ >>> tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-base", legacy=True)
+ >>> tokenizer.encode("Hello .")
+ [8774, 32099, 3, 5, 1]
+ ```
+ - `legacy=False`:
+ ```python
+ >>> from transformers import T5Tokenizer
+
+ >>> tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-base", legacy=False)
+ >>> tokenizer.encode("Hello .") # the extra space `[3]` is no longer here
+ [8774, 32099, 5, 1]
+ ```
+ Checkout the [pull request](https://github.com/huggingface/transformers/pull/24565) for more details.
+ add_prefix_space (`bool`, *optional*, defaults to `True`):
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
+ other word.
+
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file,
+ unk_token="",
+ bos_token="",
+ eos_token="",
+ pad_token=None,
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
+ add_bos_token=True,
+ add_eos_token=False,
+ clean_up_tokenization_spaces=False,
+ use_default_system_prompt=False,
+ spaces_between_special_tokens=False,
+ legacy=None,
+ add_prefix_space=True,
+ **kwargs,
+ ):
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+ bos_token = AddedToken(bos_token, normalized=False, special=True) if isinstance(bos_token, str) else bos_token
+ eos_token = AddedToken(eos_token, normalized=False, special=True) if isinstance(eos_token, str) else eos_token
+ unk_token = AddedToken(unk_token, normalized=False, special=True) if isinstance(unk_token, str) else unk_token
+ pad_token = AddedToken(pad_token, normalized=False, special=True) if isinstance(pad_token, str) else pad_token
+
+ if legacy is None:
+ logger.warning_once(
+ f"You are using the default legacy behaviour of the {self.__class__}. This is"
+ " expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you."
+ " If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it"
+ " means, and thoroughly read the reason why this was added as explained in"
+ " https://github.com/huggingface/transformers/pull/24565"
+ )
+ legacy = True
+
+ self.legacy = legacy
+ self.vocab_file = vocab_file
+ self.add_bos_token = add_bos_token
+ self.add_eos_token = add_eos_token
+ self.use_default_system_prompt = use_default_system_prompt
+ self.sp_model = self.get_spm_processor(kwargs.pop("from_slow", False))
+ self.add_prefix_space = add_prefix_space
+
+ super().__init__(
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ add_bos_token=add_bos_token,
+ add_eos_token=add_eos_token,
+ sp_model_kwargs=self.sp_model_kwargs,
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
+ use_default_system_prompt=use_default_system_prompt,
+ spaces_between_special_tokens=spaces_between_special_tokens,
+ legacy=legacy,
+ add_prefix_space=add_prefix_space,
+ **kwargs,
+ )
+
+ @property
+ def unk_token_length(self):
+ return len(self.sp_model.encode(str(self.unk_token)))
+
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_spm_processor
+ def get_spm_processor(self, from_slow=False):
+ tokenizer = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ if self.legacy or from_slow: # no dependency on protobuf
+ tokenizer.Load(self.vocab_file)
+ return tokenizer
+
+ with open(self.vocab_file, "rb") as f:
+ sp_model = f.read()
+ model_pb2 = import_protobuf(f"The new behaviour of {self.__class__.__name__} (with `self.legacy = False`)")
+ model = model_pb2.ModelProto.FromString(sp_model)
+ normalizer_spec = model_pb2.NormalizerSpec()
+ normalizer_spec.add_dummy_prefix = False
+ model.normalizer_spec.MergeFrom(normalizer_spec)
+ sp_model = model.SerializeToString()
+ tokenizer.LoadFromSerializedProto(sp_model)
+ return tokenizer
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["sp_model"] = None
+ state["sp_model_proto"] = self.sp_model.serialized_model_proto()
+ return state
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
+
+ @property
+ def vocab_size(self):
+ """Returns vocab size"""
+ return self.sp_model.get_piece_size()
+
+ def get_vocab(self):
+ """Returns vocab as a dict"""
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.tokenize
+ def tokenize(self, text: "TextInput", **kwargs) -> List[str]:
+ """
+ Converts a string to a list of tokens. If `self.legacy` is set to `False`, a prefix token is added unless the
+ first token is special.
+ """
+ if self.legacy or len(text) == 0:
+ return super().tokenize(text, **kwargs)
+
+ text = text.replace(SPIECE_UNDERLINE, " ")
+ if self.add_prefix_space:
+ text = SPIECE_UNDERLINE + text
+
+ tokens = super().tokenize(text, **kwargs)
+
+ if len(tokens) > 1 and tokens[0] == SPIECE_UNDERLINE and tokens[1] in self.all_special_tokens:
+ tokens = tokens[1:]
+ return tokens
+
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._tokenize
+ def _tokenize(self, text, **kwargs):
+ """
+ Returns a tokenized string.
+
+ We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any
+ SPIECE_UNDERLINE. For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give
+ `['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f"{unk_token}text"` and strip the
+ `unk_token`. Here is an example with `unk_token = ""` and `unk_token_length = 4`.
+ `self.tokenizer.sp_model.encode(" Hey", out_type = str)[4:]`.
+ """
+ tokens = self.sp_model.encode(text, out_type=str)
+ if self.legacy or not text.startswith((SPIECE_UNDERLINE, " ")):
+ return tokens
+
+ # 1. Encode string + prefix ex: " Hey"
+ tokens = self.sp_model.encode(self.unk_token + text, out_type=str)
+ # 2. Remove self.unk_token from ['<','unk','>', '▁Hey']
+ return tokens[self.unk_token_length :] if len(tokens) >= self.unk_token_length else tokens
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.sp_model.piece_to_id(token)
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ token = self.sp_model.IdToPiece(index)
+ return token
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ # since we manually add the prefix space, we have to remove it when decoding
+ if tokens[0].startswith(SPIECE_UNDERLINE) and self.add_prefix_space:
+ tokens[0] = tokens[0][1:]
+
+ current_sub_tokens = []
+ out_string = ""
+ prev_is_special = False
+ for i, token in enumerate(tokens):
+ # make sure that special tokens are not decoded using sentencepiece model
+ if token in self.all_special_tokens:
+ if not prev_is_special and i != 0 and self.legacy:
+ out_string += " "
+ out_string += self.sp_model.decode(current_sub_tokens) + token
+ prev_is_special = True
+ current_sub_tokens = []
+ else:
+ current_sub_tokens.append(token)
+ prev_is_special = False
+ out_string += self.sp_model.decode(current_sub_tokens)
+ return out_string
+
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ """
+ Save the vocabulary and special tokens file to a directory.
+
+ Args:
+ save_directory (`str`):
+ The directory in which to save the vocabulary.
+
+ Returns:
+ `Tuple(str)`: Paths to the files saved.
+ """
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+ elif not os.path.isfile(self.vocab_file):
+ with open(out_vocab_file, "wb") as fi:
+ content_spiece_model = self.sp_model.serialized_model_proto()
+ fi.write(content_spiece_model)
+
+ return (out_vocab_file,)
+
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
+
+ output = bos_token_id + token_ids_0 + eos_token_id
+
+ if token_ids_1 is not None:
+ output = output + bos_token_id + token_ids_1 + eos_token_id
+
+ return output
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ bos_token_id = [1] if self.add_bos_token else []
+ eos_token_id = [1] if self.add_eos_token else []
+
+ if token_ids_1 is None:
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
+ return (
+ bos_token_id
+ + ([0] * len(token_ids_0))
+ + eos_token_id
+ + bos_token_id
+ + ([0] * len(token_ids_1))
+ + eos_token_id
+ )
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
+ sequence pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of ids.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
+
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
+
+ if token_ids_1 is not None:
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
+
+ return output
+
+ @property
+ def default_chat_template(self):
+ """
+ LLaMA uses [INST] and [/INST] to indicate user messages, and <> and <> to indicate system messages.
+ Assistant messages do not have special tokens, because LLaMA chat models are generally trained with strict
+ user/assistant/user/assistant message ordering, and so assistant messages can be identified from the ordering
+ rather than needing special tokens. The system message is partly 'embedded' in the first user message, which
+ results in an unusual token ordering when it is present. This template should definitely be changed if you wish
+ to fine-tune a model with more flexible role ordering!
+
+ The output should look something like:
+
+ [INST] B_SYS SystemPrompt E_SYS Prompt [/INST] Answer [INST] Prompt [/INST] Answer
+ [INST] Prompt [/INST]
+
+ The reference for this chat template is [this code
+ snippet](https://github.com/facebookresearch/llama/blob/556949fdfb72da27c2f4a40b7f0e4cf0b8153a28/llama/generation.py#L320-L362)
+ in the original repository.
+ """
+ logger.warning_once(
+ "\nNo chat template is defined for this tokenizer - using the default template "
+ f"for the {self.__class__.__name__} class. If the default is not appropriate for "
+ "your model, please set `tokenizer.chat_template` to an appropriate template. "
+ "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
+ )
+ template = (
+ "{% if messages[0]['role'] == 'system' %}"
+ "{% set loop_messages = messages[1:] %}" # Extract system message if it's present
+ "{% set system_message = messages[0]['content'] %}"
+ "{% elif USE_DEFAULT_PROMPT == true and not '<>' in messages[0]['content'] %}"
+ "{% set loop_messages = messages %}" # Or use the default system message if the flag is set
+ "{% set system_message = 'DEFAULT_SYSTEM_MESSAGE' %}"
+ "{% else %}"
+ "{% set loop_messages = messages %}"
+ "{% set system_message = false %}"
+ "{% endif %}"
+ "{% for message in loop_messages %}" # Loop over all non-system messages
+ "{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}"
+ "{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}"
+ "{% endif %}"
+ "{% if loop.index0 == 0 and system_message != false %}" # Embed system message in first message
+ "{% set content = '<>\\n' + system_message + '\\n<>\\n\\n' + message['content'] %}"
+ "{% else %}"
+ "{% set content = message['content'] %}"
+ "{% endif %}"
+ "{% if message['role'] == 'user' %}" # After all of that, handle messages/roles in a fairly normal way
+ "{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}"
+ "{% elif message['role'] == 'system' %}"
+ "{{ '<>\\n' + content.strip() + '\\n<>\\n\\n' }}"
+ "{% elif message['role'] == 'assistant' %}"
+ "{{ ' ' + content.strip() + ' ' + eos_token }}"
+ "{% endif %}"
+ "{% endfor %}"
+ )
+ template = template.replace("USE_DEFAULT_PROMPT", "true" if self.use_default_system_prompt else "false")
+ default_message = DEFAULT_SYSTEM_PROMPT.replace("\n", "\\n").replace("'", "\\'")
+ template = template.replace("DEFAULT_SYSTEM_MESSAGE", default_message)
+
+ return template
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/tokenization_llama_fast.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/tokenization_llama_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..fee7711987058594c1b1d19ab4b5e6abe3b23981
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/llama/tokenization_llama_fast.py
@@ -0,0 +1,290 @@
+# coding=utf-8
+# Copyright 2020 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import os
+from shutil import copyfile
+from typing import Optional, Tuple
+
+from tokenizers import processors
+
+from ...tokenization_utils_fast import PreTrainedTokenizerFast
+from ...utils import is_sentencepiece_available, logging
+from ...utils.versions import require_version
+
+
+require_version("tokenizers>=0.13.3")
+
+if is_sentencepiece_available():
+ from .tokenization_llama import LlamaTokenizer
+else:
+ LlamaTokenizer = None
+
+logger = logging.get_logger(__name__)
+VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model", "tokenizer_file": "tokenizer.json"}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {
+ "hf-internal-testing/llama-tokenizer": "https://huggingface.co/hf-internal-testing/llama-tokenizer/resolve/main/tokenizer.model",
+ },
+ "tokenizer_file": {
+ "hf-internal-testing/llama-tokenizer": "https://huggingface.co/hf-internal-testing/llama-tokenizer/resolve/main/tokenizer_config.json",
+ },
+}
+B_INST, E_INST = "[INST]", "[/INST]"
+B_SYS, E_SYS = "<>\n", "\n<>\n\n"
+
+# fmt: off
+DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your \
+answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure\
+ that your responses are socially unbiased and positive in nature.
+
+If a question does not make any sense, or is not factually coherent, explain why instead of answering something not \
+correct. If you don't know the answer to a question, please don't share false information."""
+# fmt: on
+
+
+class LlamaTokenizerFast(PreTrainedTokenizerFast):
+ """
+ Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding.
+
+ This uses notably ByteFallback and no normalization.
+
+ ```python
+ >>> from transformers import LlamaTokenizerFast
+
+ >>> tokenizer = LlamaTokenizerFast.from_pretrained("hf-internal-testing/llama-tokenizer")
+ >>> tokenizer.encode("Hello this is a test")
+ [1, 15043, 445, 338, 263, 1243]
+ ```
+
+ If you want to change the `bos_token` or the `eos_token`, make sure to specify them when initializing the model, or
+ call `tokenizer.update_post_processor()` to make sure that the post-processing is correctly done (otherwise the
+ values of the first token and final token of an encoded sequence will not be correct). For more details, checkout
+ [post-processors] (https://huggingface.co/docs/tokenizers/api/post-processors) documentation.
+
+
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
+ refer to this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`, *optional*):
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .model extension) that
+ contains the vocabulary necessary to instantiate a tokenizer.
+ tokenizer_file (`str`, *optional*):
+ [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that
+ contains everything needed to load the tokenizer.
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
+ Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
+ extra spaces.
+ unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+ eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `""`):
+ The end of sequence token.
+ add_bos_token (`bool`, *optional*, defaults to `True`):
+ Whether or not to add an `bos_token` at the start of sequences.
+ add_eos_token (`bool`, *optional*, defaults to `False`):
+ Whether or not to add an `eos_token` at the end of sequences.
+ use_default_system_prompt (`bool`, *optional*, defaults to `False`):
+ Whether or not the default system prompt for Llama should be used.
+ add_prefix_space (`bool`, *optional*):
+ Whether or not the tokenizer should automatically add a prefix space
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ slow_tokenizer_class = LlamaTokenizer
+ padding_side = "left"
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file=None,
+ tokenizer_file=None,
+ clean_up_tokenization_spaces=False,
+ unk_token="",
+ bos_token="",
+ eos_token="",
+ add_bos_token=True,
+ add_eos_token=False,
+ use_default_system_prompt=False,
+ add_prefix_space=None,
+ **kwargs,
+ ):
+ if add_prefix_space is not None:
+ logger.warning_once(
+ "You set `add_prefix_space`. The tokenizer needs to be converted from the slow tokenizers"
+ )
+ kwargs["from_slow"] = True
+
+ super().__init__(
+ vocab_file=vocab_file,
+ tokenizer_file=tokenizer_file,
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
+ unk_token=unk_token,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ add_bos_token=add_bos_token,
+ add_eos_token=add_eos_token,
+ use_default_system_prompt=use_default_system_prompt,
+ **kwargs,
+ )
+ self._add_bos_token = add_bos_token
+ self._add_eos_token = add_eos_token
+ self.update_post_processor()
+ self.use_default_system_prompt = use_default_system_prompt
+ self.vocab_file = vocab_file
+
+ @property
+ def can_save_slow_tokenizer(self) -> bool:
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
+
+ def update_post_processor(self):
+ """
+ Updates the underlying post processor with the current `bos_token` and `eos_token`.
+ """
+ bos = self.bos_token
+ bos_token_id = self.bos_token_id
+ if bos is None and self.add_bos_token:
+ raise ValueError("add_bos_token = True but bos_token = None")
+
+ eos = self.eos_token
+ eos_token_id = self.eos_token_id
+ if eos is None and self.add_eos_token:
+ raise ValueError("add_eos_token = True but eos_token = None")
+
+ single = f"{(bos+':0 ') if self.add_bos_token else ''}$A:0{(' '+eos+':0') if self.add_eos_token else ''}"
+ pair = f"{single}{(' '+bos+':1') if self.add_bos_token else ''} $B:1{(' '+eos+':1') if self.add_eos_token else ''}"
+
+ special_tokens = []
+ if self.add_bos_token:
+ special_tokens.append((bos, bos_token_id))
+ if self.add_eos_token:
+ special_tokens.append((eos, eos_token_id))
+ self._tokenizer.post_processor = processors.TemplateProcessing(
+ single=single, pair=pair, special_tokens=special_tokens
+ )
+
+ @property
+ def add_eos_token(self):
+ return self._add_eos_token
+
+ @property
+ def add_bos_token(self):
+ return self._add_bos_token
+
+ @add_eos_token.setter
+ def add_eos_token(self, value):
+ self._add_eos_token = value
+ self.update_post_processor()
+
+ @add_bos_token.setter
+ def add_bos_token(self, value):
+ self._add_bos_token = value
+ self.update_post_processor()
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not self.can_save_slow_tokenizer:
+ raise ValueError(
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
+ "tokenizer."
+ )
+
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+
+ return (out_vocab_file,)
+
+ @property
+ # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.default_chat_template
+ def default_chat_template(self):
+ """
+ LLaMA uses [INST] and [/INST] to indicate user messages, and <> and <> to indicate system messages.
+ Assistant messages do not have special tokens, because LLaMA chat models are generally trained with strict
+ user/assistant/user/assistant message ordering, and so assistant messages can be identified from the ordering
+ rather than needing special tokens. The system message is partly 'embedded' in the first user message, which
+ results in an unusual token ordering when it is present. This template should definitely be changed if you wish
+ to fine-tune a model with more flexible role ordering!
+
+ The output should look something like:
+
+ [INST] B_SYS SystemPrompt E_SYS Prompt [/INST] Answer [INST] Prompt [/INST] Answer
+ [INST] Prompt [/INST]
+
+ The reference for this chat template is [this code
+ snippet](https://github.com/facebookresearch/llama/blob/556949fdfb72da27c2f4a40b7f0e4cf0b8153a28/llama/generation.py#L320-L362)
+ in the original repository.
+ """
+ logger.warning_once(
+ "\nNo chat template is defined for this tokenizer - using the default template "
+ f"for the {self.__class__.__name__} class. If the default is not appropriate for "
+ "your model, please set `tokenizer.chat_template` to an appropriate template. "
+ "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
+ )
+ template = (
+ "{% if messages[0]['role'] == 'system' %}"
+ "{% set loop_messages = messages[1:] %}" # Extract system message if it's present
+ "{% set system_message = messages[0]['content'] %}"
+ "{% elif USE_DEFAULT_PROMPT == true and not '<>' in messages[0]['content'] %}"
+ "{% set loop_messages = messages %}" # Or use the default system message if the flag is set
+ "{% set system_message = 'DEFAULT_SYSTEM_MESSAGE' %}"
+ "{% else %}"
+ "{% set loop_messages = messages %}"
+ "{% set system_message = false %}"
+ "{% endif %}"
+ "{% for message in loop_messages %}" # Loop over all non-system messages
+ "{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}"
+ "{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}"
+ "{% endif %}"
+ "{% if loop.index0 == 0 and system_message != false %}" # Embed system message in first message
+ "{% set content = '<>\\n' + system_message + '\\n<>\\n\\n' + message['content'] %}"
+ "{% else %}"
+ "{% set content = message['content'] %}"
+ "{% endif %}"
+ "{% if message['role'] == 'user' %}" # After all of that, handle messages/roles in a fairly normal way
+ "{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}"
+ "{% elif message['role'] == 'system' %}"
+ "{{ '<>\\n' + content.strip() + '\\n<>\\n\\n' }}"
+ "{% elif message['role'] == 'assistant' %}"
+ "{{ ' ' + content.strip() + ' ' + eos_token }}"
+ "{% endif %}"
+ "{% endfor %}"
+ )
+ template = template.replace("USE_DEFAULT_PROMPT", "true" if self.use_default_system_prompt else "false")
+ default_message = DEFAULT_SYSTEM_PROMPT.replace("\n", "\\n").replace("'", "\\'")
+ template = template.replace("DEFAULT_SYSTEM_MESSAGE", default_message)
+
+ return template
+
+ # TODO ArthurZ let's rely on the template processor instead, refactor all fast tokenizers
+ # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.build_inputs_with_special_tokens
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
+
+ output = bos_token_id + token_ids_0 + eos_token_id
+
+ if token_ids_1 is not None:
+ output = output + bos_token_id + token_ids_1 + eos_token_id
+
+ return output
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/pvt/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/pvt/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..cab5af9af7c99775651e2f4a322265670676b8da
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/pvt/__init__.py
@@ -0,0 +1,80 @@
+# coding=utf-8
+# Copyright 2023 Authors: Wenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan,
+# Kaitao Song, Ding Liang, Tong Lu, Ping Luo, Ling Shao and The HuggingFace Inc. team.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_torch_available,
+ is_vision_available,
+)
+
+
+_import_structure = {
+ "configuration_pvt": ["PVT_PRETRAINED_CONFIG_ARCHIVE_MAP", "PvtConfig", "PvtOnnxConfig"],
+}
+
+try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["image_processing_pvt"] = ["PvtImageProcessor"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_pvt"] = [
+ "PVT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "PvtForImageClassification",
+ "PvtModel",
+ "PvtPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_pvt import PVT_PRETRAINED_CONFIG_ARCHIVE_MAP, PvtConfig, PvtOnnxConfig
+
+ try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .image_processing_pvt import PvtImageProcessor
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_pvt import (
+ PVT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ PvtForImageClassification,
+ PvtModel,
+ PvtPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..29ab09e102ee5b189eedadb4ae6033a5c5b32442
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/configuration_pvt.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/configuration_pvt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fe285f96d8dfb3b4d8b4b938784a136669b32ff7
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/configuration_pvt.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/convert_pvt_to_pytorch.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/convert_pvt_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9157794f1dd71711228c1e93ad2b6db47ea2c72b
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/convert_pvt_to_pytorch.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/image_processing_pvt.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/image_processing_pvt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..946b87f293d8516106ae92b7aaa99d2a2fda36c9
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/image_processing_pvt.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/modeling_pvt.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/modeling_pvt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..eb250e3dab550d5d98093623ba992b7bfa0cbe92
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/modeling_pvt.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/pvt/configuration_pvt.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/pvt/configuration_pvt.py
new file mode 100644
index 0000000000000000000000000000000000000000..ac7d5add7f5971942092159f018cb0dcd3d93c83
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/pvt/configuration_pvt.py
@@ -0,0 +1,164 @@
+# coding=utf-8
+# Copyright 2023 Authors: Wenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan,
+# Kaitao Song, Ding Liang, Tong Lu, Ping Luo, Ling Shao and The HuggingFace Inc. team.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Pvt model configuration"""
+
+from collections import OrderedDict
+from typing import Callable, List, Mapping
+
+from packaging import version
+
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+PVT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "pvt-tiny-224": "https://huggingface.co/Zetatech/pvt-tiny-224",
+ # See all PVT models at https://huggingface.co/models?filter=pvt
+}
+
+
+class PvtConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`PvtModel`]. It is used to instantiate an Pvt
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the Pvt
+ [Xrenya/pvt-tiny-224](https://huggingface.co/Xrenya/pvt-tiny-224) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ image_size (`int`, *optional*, defaults to 224):
+ The input image size
+ num_channels (`int`, *optional*, defaults to 3):
+ The number of input channels.
+ num_encoder_blocks (`int`, *optional*, defaults to 4):
+ The number of encoder blocks (i.e. stages in the Mix Transformer encoder).
+ depths (`List[int]`, *optional*, defaults to `[2, 2, 2, 2]`):
+ The number of layers in each encoder block.
+ sequence_reduction_ratios (`List[int]`, *optional*, defaults to `[8, 4, 2, 1]`):
+ Sequence reduction ratios in each encoder block.
+ hidden_sizes (`List[int]`, *optional*, defaults to `[64, 128, 320, 512]`):
+ Dimension of each of the encoder blocks.
+ patch_sizes (`List[int]`, *optional*, defaults to `[4, 2, 2, 2]`):
+ Patch size before each encoder block.
+ strides (`List[int]`, *optional*, defaults to `[4, 2, 2, 2]`):
+ Stride before each encoder block.
+ num_attention_heads (`List[int]`, *optional*, defaults to `[1, 2, 5, 8]`):
+ Number of attention heads for each attention layer in each block of the Transformer encoder.
+ mlp_ratios (`List[int]`, *optional*, defaults to `[8, 8, 4, 4]`):
+ Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the
+ encoder blocks.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
+ The dropout probability for stochastic depth, used in the blocks of the Transformer encoder.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
+ The epsilon used by the layer normalization layers.
+ qkv_bias (`bool`, *optional*, defaults to `True`):
+ Whether or not a learnable bias should be added to the queries, keys and values.
+ num_labels ('int', *optional*, defaults to 1000):
+ The number of classes.
+ Example:
+
+ ```python
+ >>> from transformers import PvtModel, PvtConfig
+
+ >>> # Initializing a PVT Xrenya/pvt-tiny-224 style configuration
+ >>> configuration = PvtConfig()
+
+ >>> # Initializing a model from the Xrenya/pvt-tiny-224 style configuration
+ >>> model = PvtModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "pvt"
+
+ def __init__(
+ self,
+ image_size: int = 224,
+ num_channels: int = 3,
+ num_encoder_blocks: int = 4,
+ depths: List[int] = [2, 2, 2, 2],
+ sequence_reduction_ratios: List[int] = [8, 4, 2, 1],
+ hidden_sizes: List[int] = [64, 128, 320, 512],
+ patch_sizes: List[int] = [4, 2, 2, 2],
+ strides: List[int] = [4, 2, 2, 2],
+ num_attention_heads: List[int] = [1, 2, 5, 8],
+ mlp_ratios: List[int] = [8, 8, 4, 4],
+ hidden_act: Mapping[str, Callable] = "gelu",
+ hidden_dropout_prob: float = 0.0,
+ attention_probs_dropout_prob: float = 0.0,
+ initializer_range: float = 0.02,
+ drop_path_rate: float = 0.0,
+ layer_norm_eps: float = 1e-6,
+ qkv_bias: bool = True,
+ num_labels: int = 1000,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.image_size = image_size
+ self.num_channels = num_channels
+ self.num_encoder_blocks = num_encoder_blocks
+ self.depths = depths
+ self.sequence_reduction_ratios = sequence_reduction_ratios
+ self.hidden_sizes = hidden_sizes
+ self.patch_sizes = patch_sizes
+ self.strides = strides
+ self.mlp_ratios = mlp_ratios
+ self.num_attention_heads = num_attention_heads
+ self.hidden_act = hidden_act
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.initializer_range = initializer_range
+ self.drop_path_rate = drop_path_rate
+ self.layer_norm_eps = layer_norm_eps
+ self.num_labels = num_labels
+ self.qkv_bias = qkv_bias
+
+
+class PvtOnnxConfig(OnnxConfig):
+ torch_onnx_minimum_version = version.parse("1.11")
+
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ return OrderedDict(
+ [
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
+ ]
+ )
+
+ @property
+ def atol_for_validation(self) -> float:
+ return 1e-4
+
+ @property
+ def default_onnx_opset(self) -> int:
+ return 12
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/pvt/convert_pvt_to_pytorch.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/pvt/convert_pvt_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..187f3200d608a57a473b429c8dae81560863cd31
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/pvt/convert_pvt_to_pytorch.py
@@ -0,0 +1,227 @@
+# coding=utf-8
+# Copyright 2023 Authors: Wenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan,
+# Kaitao Song, Ding Liang, Tong Lu, Ping Luo, Ling Shao and The HuggingFace Inc. team.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert Pvt checkpoints from the original library."""
+
+
+import argparse
+from pathlib import Path
+
+import requests
+import torch
+from PIL import Image
+
+from transformers import PvtConfig, PvtForImageClassification, PvtImageProcessor
+from transformers.utils import logging
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+
+# here we list all keys to be renamed (original name on the left, our name on the right)
+def create_rename_keys(config):
+ rename_keys = []
+ for i in range(config.num_encoder_blocks):
+ # Remane embedings' paramters
+ rename_keys.append((f"pos_embed{i + 1}", f"pvt.encoder.patch_embeddings.{i}.position_embeddings"))
+
+ rename_keys.append((f"patch_embed{i + 1}.proj.weight", f"pvt.encoder.patch_embeddings.{i}.projection.weight"))
+ rename_keys.append((f"patch_embed{i + 1}.proj.bias", f"pvt.encoder.patch_embeddings.{i}.projection.bias"))
+ rename_keys.append((f"patch_embed{i + 1}.norm.weight", f"pvt.encoder.patch_embeddings.{i}.layer_norm.weight"))
+ rename_keys.append((f"patch_embed{i + 1}.norm.bias", f"pvt.encoder.patch_embeddings.{i}.layer_norm.bias"))
+
+ for j in range(config.depths[i]):
+ # Rename blocks' parameters
+ rename_keys.append(
+ (f"block{i + 1}.{j}.attn.q.weight", f"pvt.encoder.block.{i}.{j}.attention.self.query.weight")
+ )
+ rename_keys.append(
+ (f"block{i + 1}.{j}.attn.q.bias", f"pvt.encoder.block.{i}.{j}.attention.self.query.bias")
+ )
+ rename_keys.append(
+ (f"block{i + 1}.{j}.attn.kv.weight", f"pvt.encoder.block.{i}.{j}.attention.self.kv.weight")
+ )
+ rename_keys.append((f"block{i + 1}.{j}.attn.kv.bias", f"pvt.encoder.block.{i}.{j}.attention.self.kv.bias"))
+
+ if config.sequence_reduction_ratios[i] > 1:
+ rename_keys.append(
+ (
+ f"block{i + 1}.{j}.attn.norm.weight",
+ f"pvt.encoder.block.{i}.{j}.attention.self.layer_norm.weight",
+ )
+ )
+ rename_keys.append(
+ (f"block{i + 1}.{j}.attn.norm.bias", f"pvt.encoder.block.{i}.{j}.attention.self.layer_norm.bias")
+ )
+ rename_keys.append(
+ (
+ f"block{i + 1}.{j}.attn.sr.weight",
+ f"pvt.encoder.block.{i}.{j}.attention.self.sequence_reduction.weight",
+ )
+ )
+ rename_keys.append(
+ (
+ f"block{i + 1}.{j}.attn.sr.bias",
+ f"pvt.encoder.block.{i}.{j}.attention.self.sequence_reduction.bias",
+ )
+ )
+
+ rename_keys.append(
+ (f"block{i + 1}.{j}.attn.proj.weight", f"pvt.encoder.block.{i}.{j}.attention.output.dense.weight")
+ )
+ rename_keys.append(
+ (f"block{i + 1}.{j}.attn.proj.bias", f"pvt.encoder.block.{i}.{j}.attention.output.dense.bias")
+ )
+
+ rename_keys.append((f"block{i + 1}.{j}.norm1.weight", f"pvt.encoder.block.{i}.{j}.layer_norm_1.weight"))
+ rename_keys.append((f"block{i + 1}.{j}.norm1.bias", f"pvt.encoder.block.{i}.{j}.layer_norm_1.bias"))
+
+ rename_keys.append((f"block{i + 1}.{j}.norm2.weight", f"pvt.encoder.block.{i}.{j}.layer_norm_2.weight"))
+ rename_keys.append((f"block{i + 1}.{j}.norm2.bias", f"pvt.encoder.block.{i}.{j}.layer_norm_2.bias"))
+
+ rename_keys.append((f"block{i + 1}.{j}.mlp.fc1.weight", f"pvt.encoder.block.{i}.{j}.mlp.dense1.weight"))
+ rename_keys.append((f"block{i + 1}.{j}.mlp.fc1.bias", f"pvt.encoder.block.{i}.{j}.mlp.dense1.bias"))
+ rename_keys.append((f"block{i + 1}.{j}.mlp.fc2.weight", f"pvt.encoder.block.{i}.{j}.mlp.dense2.weight"))
+ rename_keys.append((f"block{i + 1}.{j}.mlp.fc2.bias", f"pvt.encoder.block.{i}.{j}.mlp.dense2.bias"))
+
+ # Rename cls token
+ rename_keys.extend(
+ [
+ ("cls_token", "pvt.encoder.patch_embeddings.3.cls_token"),
+ ]
+ )
+ # Rename norm layer and classifier layer
+ rename_keys.extend(
+ [
+ ("norm.weight", "pvt.encoder.layer_norm.weight"),
+ ("norm.bias", "pvt.encoder.layer_norm.bias"),
+ ("head.weight", "classifier.weight"),
+ ("head.bias", "classifier.bias"),
+ ]
+ )
+
+ return rename_keys
+
+
+# we split up the matrix of each encoder layer into queries, keys and values
+def read_in_k_v(state_dict, config):
+ # for each of the encoder blocks:
+ for i in range(config.num_encoder_blocks):
+ for j in range(config.depths[i]):
+ # read in weights + bias of keys and values (which is a single matrix in the original implementation)
+ kv_weight = state_dict.pop(f"pvt.encoder.block.{i}.{j}.attention.self.kv.weight")
+ kv_bias = state_dict.pop(f"pvt.encoder.block.{i}.{j}.attention.self.kv.bias")
+ # next, add keys and values (in that order) to the state dict
+ state_dict[f"pvt.encoder.block.{i}.{j}.attention.self.key.weight"] = kv_weight[: config.hidden_sizes[i], :]
+ state_dict[f"pvt.encoder.block.{i}.{j}.attention.self.key.bias"] = kv_bias[: config.hidden_sizes[i]]
+
+ state_dict[f"pvt.encoder.block.{i}.{j}.attention.self.value.weight"] = kv_weight[
+ config.hidden_sizes[i] :, :
+ ]
+ state_dict[f"pvt.encoder.block.{i}.{j}.attention.self.value.bias"] = kv_bias[config.hidden_sizes[i] :]
+
+
+def rename_key(dct, old, new):
+ val = dct.pop(old)
+ dct[new] = val
+
+
+# We will verify our results on an image of cute cats
+def prepare_img():
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ im = Image.open(requests.get(url, stream=True).raw)
+ return im
+
+
+@torch.no_grad()
+def convert_pvt_checkpoint(pvt_size, pvt_checkpoint, pytorch_dump_folder_path):
+ """
+ Copy/paste/tweak model's weights to our PVT structure.
+ """
+
+ # define default Pvt configuration
+ if pvt_size == "tiny":
+ config_path = "Zetatech/pvt-tiny-224"
+ elif pvt_size == "small":
+ config_path = "Zetatech/pvt-small-224"
+ elif pvt_size == "medium":
+ config_path = "Zetatech/pvt-medium-224"
+ elif pvt_size == "large":
+ config_path = "Zetatech/pvt-large-224"
+ else:
+ raise ValueError(f"Available model's size: 'tiny', 'small', 'medium', 'large', but " f"'{pvt_size}' was given")
+ config = PvtConfig(name_or_path=config_path)
+ # load original model from https://github.com/whai362/PVT
+ state_dict = torch.load(pvt_checkpoint, map_location="cpu")
+
+ rename_keys = create_rename_keys(config)
+ for src, dest in rename_keys:
+ rename_key(state_dict, src, dest)
+ read_in_k_v(state_dict, config)
+
+ # load HuggingFace model
+ model = PvtForImageClassification(config).eval()
+ model.load_state_dict(state_dict)
+
+ # Check outputs on an image, prepared by PVTFeatureExtractor
+ image_processor = PvtImageProcessor(size=config.image_size)
+ encoding = image_processor(images=prepare_img(), return_tensors="pt")
+ pixel_values = encoding["pixel_values"]
+ outputs = model(pixel_values)
+ logits = outputs.logits.detach().cpu()
+
+ if pvt_size == "tiny":
+ expected_slice_logits = torch.tensor([-1.4192, -1.9158, -0.9702])
+ elif pvt_size == "small":
+ expected_slice_logits = torch.tensor([0.4353, -0.1960, -0.2373])
+ elif pvt_size == "medium":
+ expected_slice_logits = torch.tensor([-0.2914, -0.2231, 0.0321])
+ elif pvt_size == "large":
+ expected_slice_logits = torch.tensor([0.3740, -0.7739, -0.4214])
+ else:
+ raise ValueError(f"Available model's size: 'tiny', 'small', 'medium', 'large', but " f"'{pvt_size}' was given")
+
+ assert torch.allclose(logits[0, :3], expected_slice_logits, atol=1e-4)
+
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
+ print(f"Saving model pytorch_model.bin to {pytorch_dump_folder_path}")
+ model.save_pretrained(pytorch_dump_folder_path)
+ print(f"Saving image processor to {pytorch_dump_folder_path}")
+ image_processor.save_pretrained(pytorch_dump_folder_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--pvt_size",
+ default="tiny",
+ type=str,
+ help="Size of the PVT pretrained model you'd like to convert.",
+ )
+ parser.add_argument(
+ "--pvt_checkpoint",
+ default="pvt_tiny.pth",
+ type=str,
+ help="Checkpoint of the PVT pretrained model you'd like to convert.",
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
+ )
+
+ args = parser.parse_args()
+ convert_pvt_checkpoint(args.pvt_size, args.pvt_checkpoint, args.pytorch_dump_folder_path)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/pvt/image_processing_pvt.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/pvt/image_processing_pvt.py
new file mode 100644
index 0000000000000000000000000000000000000000..f3907edf3af09394acbacb2db992c7a3a71ef091
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/pvt/image_processing_pvt.py
@@ -0,0 +1,290 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Image processor class for Pvt."""
+
+from typing import Dict, List, Optional, Union
+
+import numpy as np
+
+from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
+from ...image_transforms import resize, to_channel_dimension_format
+from ...image_utils import (
+ IMAGENET_DEFAULT_MEAN,
+ IMAGENET_DEFAULT_STD,
+ ChannelDimension,
+ ImageInput,
+ PILImageResampling,
+ infer_channel_dimension_format,
+ is_scaled_image,
+ make_list_of_images,
+ to_numpy_array,
+ valid_images,
+ validate_kwargs,
+ validate_preprocess_arguments,
+)
+from ...utils import TensorType, logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class PvtImageProcessor(BaseImageProcessor):
+ r"""
+ Constructs a PVT image processor.
+
+ Args:
+ do_resize (`bool`, *optional*, defaults to `True`):
+ Whether to resize the image's (height, width) dimensions to the specified `(size["height"],
+ size["width"])`. Can be overridden by the `do_resize` parameter in the `preprocess` method.
+ size (`dict`, *optional*, defaults to `{"height": 224, "width": 224}`):
+ Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
+ method.
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
+ Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
+ `preprocess` method.
+ do_rescale (`bool`, *optional*, defaults to `True`):
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
+ parameter in the `preprocess` method.
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
+ `preprocess` method.
+ do_normalize (`bool`, *optional*, defaults to `True`):
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
+ method.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`):
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`):
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
+ """
+
+ model_input_names = ["pixel_values"]
+
+ def __init__(
+ self,
+ do_resize: bool = True,
+ size: Optional[Dict[str, int]] = None,
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
+ do_rescale: bool = True,
+ rescale_factor: Union[int, float] = 1 / 255,
+ do_normalize: bool = True,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ **kwargs,
+ ) -> None:
+ super().__init__(**kwargs)
+ size = size if size is not None else {"height": 224, "width": 224}
+ size = get_size_dict(size)
+ self.do_resize = do_resize
+ self.do_rescale = do_rescale
+ self.do_normalize = do_normalize
+ self.size = size
+ self.resample = resample
+ self.rescale_factor = rescale_factor
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
+ self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
+ self._valid_processor_keys = [
+ "images",
+ "do_resize",
+ "size",
+ "resample",
+ "do_rescale",
+ "rescale_factor",
+ "do_normalize",
+ "image_mean",
+ "image_std",
+ "return_tensors",
+ "data_format",
+ "input_data_format",
+ ]
+
+ # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize
+ def resize(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> np.ndarray:
+ """
+ Resize an image to `(size["height"], size["width"])`.
+
+ Args:
+ image (`np.ndarray`):
+ Image to resize.
+ size (`Dict[str, int]`):
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
+ data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
+ image is used. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+
+ Returns:
+ `np.ndarray`: The resized image.
+ """
+ size = get_size_dict(size)
+ if "height" not in size or "width" not in size:
+ raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
+ output_size = (size["height"], size["width"])
+ return resize(
+ image,
+ size=output_size,
+ resample=resample,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ **kwargs,
+ )
+
+ def preprocess(
+ self,
+ images: ImageInput,
+ do_resize: Optional[bool] = None,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = None,
+ do_rescale: Optional[bool] = None,
+ rescale_factor: Optional[float] = None,
+ do_normalize: Optional[bool] = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ):
+ """
+ Preprocess an image or batch of images.
+
+ Args:
+ images (`ImageInput`):
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
+ Whether to resize the image.
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
+ Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after
+ resizing.
+ resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`):
+ `PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has
+ an effect if `do_resize` is set to `True`.
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
+ Whether to rescale the image values between [0 - 1].
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
+ Whether to normalize the image.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
+ Image mean to use if `do_normalize` is set to `True`.
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
+ Image standard deviation to use if `do_normalize` is set to `True`.
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Can be one of:
+ - Unset: Return a list of `np.ndarray`.
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format for the output image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - Unset: Use the channel dimension format of the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ """
+ do_resize = do_resize if do_resize is not None else self.do_resize
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
+ resample = resample if resample is not None else self.resample
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
+ image_mean = image_mean if image_mean is not None else self.image_mean
+ image_std = image_std if image_std is not None else self.image_std
+
+ size = size if size is not None else self.size
+ size_dict = get_size_dict(size)
+
+ images = make_list_of_images(images)
+
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
+
+ if not valid_images(images):
+ raise ValueError(
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+ validate_preprocess_arguments(
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ )
+
+ # All transformations expect numpy arrays.
+ images = [to_numpy_array(image) for image in images]
+
+ if is_scaled_image(images[0]) and do_rescale:
+ logger.warning_once(
+ "It looks like you are trying to rescale already rescaled images. If the input"
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
+ )
+
+ if input_data_format is None:
+ # We assume that all images have the same channel dimension format.
+ input_data_format = infer_channel_dimension_format(images[0])
+
+ if do_resize:
+ images = [
+ self.resize(image=image, size=size_dict, resample=resample, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ if do_rescale:
+ images = [
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ if do_normalize:
+ images = [
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
+ for image in images
+ ]
+
+ images = [
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
+ ]
+
+ data = {"pixel_values": images}
+ return BatchFeature(data=data, tensor_type=return_tensors)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/pvt/modeling_pvt.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/pvt/modeling_pvt.py
new file mode 100644
index 0000000000000000000000000000000000000000..58ed0ae68fedd6ce2613640e10f8e9bfa3934679
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/pvt/modeling_pvt.py
@@ -0,0 +1,670 @@
+# coding=utf-8
+# Copyright 2023 Authors: Wenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan,
+# Kaitao Song, Ding Liang, Tong Lu, Ping Luo, Ling Shao and The HuggingFace Inc. team.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch PVT model."""
+
+import collections
+import math
+from typing import Iterable, Optional, Tuple, Union
+
+import torch
+import torch.nn.functional as F
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import BaseModelOutput, ImageClassifierOutput
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+)
+from .configuration_pvt import PvtConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "PvtConfig"
+
+_CHECKPOINT_FOR_DOC = "Zetatech/pvt-tiny-224"
+_EXPECTED_OUTPUT_SHAPE = [1, 50, 512]
+
+_IMAGE_CLASS_CHECKPOINT = "Zetatech/pvt-tiny-224"
+_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
+
+PVT_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "Zetatech/pvt-tiny-224"
+ # See all PVT models at https://huggingface.co/models?filter=pvt
+]
+
+
+# Copied from transformers.models.beit.modeling_beit.drop_path
+def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
+ """
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
+
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
+ argument.
+ """
+ if drop_prob == 0.0 or not training:
+ return input
+ keep_prob = 1 - drop_prob
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
+ random_tensor.floor_() # binarize
+ output = input.div(keep_prob) * random_tensor
+ return output
+
+
+# Copied from transformers.models.convnext.modeling_convnext.ConvNextDropPath with ConvNext->Pvt
+class PvtDropPath(nn.Module):
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
+
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
+ super().__init__()
+ self.drop_prob = drop_prob
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ return drop_path(hidden_states, self.drop_prob, self.training)
+
+ def extra_repr(self) -> str:
+ return "p={}".format(self.drop_prob)
+
+
+class PvtPatchEmbeddings(nn.Module):
+ """
+ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
+ `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
+ Transformer.
+ """
+
+ def __init__(
+ self,
+ config: PvtConfig,
+ image_size: Union[int, Iterable[int]],
+ patch_size: Union[int, Iterable[int]],
+ stride: int,
+ num_channels: int,
+ hidden_size: int,
+ cls_token: bool = False,
+ ):
+ super().__init__()
+ self.config = config
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.num_patches = num_patches
+
+ self.position_embeddings = nn.Parameter(
+ torch.randn(1, num_patches + 1 if cls_token else num_patches, hidden_size)
+ )
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, hidden_size)) if cls_token else None
+ self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=stride, stride=patch_size)
+ self.layer_norm = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(p=config.hidden_dropout_prob)
+
+ def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
+ num_patches = height * width
+ if num_patches == self.config.image_size * self.config.image_size:
+ return self.position_embeddings
+ embeddings = embeddings.reshape(1, height, width, -1).permute(0, 3, 1, 2)
+ interpolated_embeddings = F.interpolate(embeddings, size=(height, width), mode="bilinear")
+ interpolated_embeddings = interpolated_embeddings.reshape(1, -1, height * width).permute(0, 2, 1)
+ return interpolated_embeddings
+
+ def forward(self, pixel_values: torch.Tensor) -> Tuple[torch.Tensor, int, int]:
+ batch_size, num_channels, height, width = pixel_values.shape
+ if num_channels != self.num_channels:
+ raise ValueError(
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
+ )
+ patch_embed = self.projection(pixel_values)
+ *_, height, width = patch_embed.shape
+ patch_embed = patch_embed.flatten(2).transpose(1, 2)
+ embeddings = self.layer_norm(patch_embed)
+ if self.cls_token is not None:
+ cls_token = self.cls_token.expand(batch_size, -1, -1)
+ embeddings = torch.cat((cls_token, embeddings), dim=1)
+ position_embeddings = self.interpolate_pos_encoding(self.position_embeddings[:, 1:], height, width)
+ position_embeddings = torch.cat((self.position_embeddings[:, :1], position_embeddings), dim=1)
+ else:
+ position_embeddings = self.interpolate_pos_encoding(self.position_embeddings, height, width)
+ embeddings = self.dropout(embeddings + position_embeddings)
+
+ return embeddings, height, width
+
+
+class PvtSelfOutput(nn.Module):
+ def __init__(self, config: PvtConfig, hidden_size: int):
+ super().__init__()
+ self.dense = nn.Linear(hidden_size, hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ return hidden_states
+
+
+class PvtEfficientSelfAttention(nn.Module):
+ """Efficient self-attention mechanism with reduction of the sequence [PvT paper](https://arxiv.org/abs/2102.12122)."""
+
+ def __init__(
+ self, config: PvtConfig, hidden_size: int, num_attention_heads: int, sequences_reduction_ratio: float
+ ):
+ super().__init__()
+ self.hidden_size = hidden_size
+ self.num_attention_heads = num_attention_heads
+
+ if self.hidden_size % self.num_attention_heads != 0:
+ raise ValueError(
+ f"The hidden size ({self.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({self.num_attention_heads})"
+ )
+
+ self.attention_head_size = int(self.hidden_size / self.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(self.hidden_size, self.all_head_size, bias=config.qkv_bias)
+ self.key = nn.Linear(self.hidden_size, self.all_head_size, bias=config.qkv_bias)
+ self.value = nn.Linear(self.hidden_size, self.all_head_size, bias=config.qkv_bias)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+
+ self.sequences_reduction_ratio = sequences_reduction_ratio
+ if sequences_reduction_ratio > 1:
+ self.sequence_reduction = nn.Conv2d(
+ hidden_size, hidden_size, kernel_size=sequences_reduction_ratio, stride=sequences_reduction_ratio
+ )
+ self.layer_norm = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps)
+
+ def transpose_for_scores(self, hidden_states: int) -> torch.Tensor:
+ new_shape = hidden_states.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ hidden_states = hidden_states.view(new_shape)
+ return hidden_states.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ height: int,
+ width: int,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor]:
+ query_layer = self.transpose_for_scores(self.query(hidden_states))
+
+ if self.sequences_reduction_ratio > 1:
+ batch_size, seq_len, num_channels = hidden_states.shape
+ # Reshape to (batch_size, num_channels, height, width)
+ hidden_states = hidden_states.permute(0, 2, 1).reshape(batch_size, num_channels, height, width)
+ # Apply sequence reduction
+ hidden_states = self.sequence_reduction(hidden_states)
+ # Reshape back to (batch_size, seq_len, num_channels)
+ hidden_states = hidden_states.reshape(batch_size, num_channels, -1).permute(0, 2, 1)
+ hidden_states = self.layer_norm(hidden_states)
+
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ return outputs
+
+
+class PvtAttention(nn.Module):
+ def __init__(
+ self, config: PvtConfig, hidden_size: int, num_attention_heads: int, sequences_reduction_ratio: float
+ ):
+ super().__init__()
+ self.self = PvtEfficientSelfAttention(
+ config,
+ hidden_size=hidden_size,
+ num_attention_heads=num_attention_heads,
+ sequences_reduction_ratio=sequences_reduction_ratio,
+ )
+ self.output = PvtSelfOutput(config, hidden_size=hidden_size)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.self.query = prune_linear_layer(self.self.query, index)
+ self.self.key = prune_linear_layer(self.self.key, index)
+ self.self.value = prune_linear_layer(self.self.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self, hidden_states: torch.Tensor, height: int, width: int, output_attentions: bool = False
+ ) -> Tuple[torch.Tensor]:
+ self_outputs = self.self(hidden_states, height, width, output_attentions)
+
+ attention_output = self.output(self_outputs[0])
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+class PvtFFN(nn.Module):
+ def __init__(
+ self,
+ config: PvtConfig,
+ in_features: int,
+ hidden_features: Optional[int] = None,
+ out_features: Optional[int] = None,
+ ):
+ super().__init__()
+ out_features = out_features if out_features is not None else in_features
+ self.dense1 = nn.Linear(in_features, hidden_features)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+ self.dense2 = nn.Linear(hidden_features, out_features)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense1(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.dense2(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ return hidden_states
+
+
+class PvtLayer(nn.Module):
+ def __init__(
+ self,
+ config: PvtConfig,
+ hidden_size: int,
+ num_attention_heads: int,
+ drop_path: float,
+ sequences_reduction_ratio: float,
+ mlp_ratio: float,
+ ):
+ super().__init__()
+ self.layer_norm_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps)
+ self.attention = PvtAttention(
+ config=config,
+ hidden_size=hidden_size,
+ num_attention_heads=num_attention_heads,
+ sequences_reduction_ratio=sequences_reduction_ratio,
+ )
+ self.drop_path = PvtDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
+ self.layer_norm_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps)
+ mlp_hidden_size = int(hidden_size * mlp_ratio)
+ self.mlp = PvtFFN(config=config, in_features=hidden_size, hidden_features=mlp_hidden_size)
+
+ def forward(self, hidden_states: torch.Tensor, height: int, width: int, output_attentions: bool = False):
+ self_attention_outputs = self.attention(
+ hidden_states=self.layer_norm_1(hidden_states),
+ height=height,
+ width=width,
+ output_attentions=output_attentions,
+ )
+ attention_output = self_attention_outputs[0]
+ outputs = self_attention_outputs[1:]
+
+ attention_output = self.drop_path(attention_output)
+ hidden_states = attention_output + hidden_states
+
+ mlp_output = self.mlp(self.layer_norm_2(hidden_states))
+
+ mlp_output = self.drop_path(mlp_output)
+ layer_output = hidden_states + mlp_output
+
+ outputs = (layer_output,) + outputs
+
+ return outputs
+
+
+class PvtEncoder(nn.Module):
+ def __init__(self, config: PvtConfig):
+ super().__init__()
+ self.config = config
+
+ # stochastic depth decay rule
+ drop_path_decays = torch.linspace(0, config.drop_path_rate, sum(config.depths)).tolist()
+
+ # patch embeddings
+ embeddings = []
+
+ for i in range(config.num_encoder_blocks):
+ embeddings.append(
+ PvtPatchEmbeddings(
+ config=config,
+ image_size=config.image_size if i == 0 else self.config.image_size // (2 ** (i + 1)),
+ patch_size=config.patch_sizes[i],
+ stride=config.strides[i],
+ num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1],
+ hidden_size=config.hidden_sizes[i],
+ cls_token=i == config.num_encoder_blocks - 1,
+ )
+ )
+ self.patch_embeddings = nn.ModuleList(embeddings)
+
+ # Transformer blocks
+ blocks = []
+ cur = 0
+ for i in range(config.num_encoder_blocks):
+ # each block consists of layers
+ layers = []
+ if i != 0:
+ cur += config.depths[i - 1]
+ for j in range(config.depths[i]):
+ layers.append(
+ PvtLayer(
+ config=config,
+ hidden_size=config.hidden_sizes[i],
+ num_attention_heads=config.num_attention_heads[i],
+ drop_path=drop_path_decays[cur + j],
+ sequences_reduction_ratio=config.sequence_reduction_ratios[i],
+ mlp_ratio=config.mlp_ratios[i],
+ )
+ )
+ blocks.append(nn.ModuleList(layers))
+
+ self.block = nn.ModuleList(blocks)
+
+ # Layer norms
+ self.layer_norm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps)
+
+ def forward(
+ self,
+ pixel_values: torch.FloatTensor,
+ output_attentions: Optional[bool] = False,
+ output_hidden_states: Optional[bool] = False,
+ return_dict: Optional[bool] = True,
+ ) -> Union[Tuple, BaseModelOutput]:
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ batch_size = pixel_values.shape[0]
+ num_blocks = len(self.block)
+ hidden_states = pixel_values
+ for idx, (embedding_layer, block_layer) in enumerate(zip(self.patch_embeddings, self.block)):
+ # first, obtain patch embeddings
+ hidden_states, height, width = embedding_layer(hidden_states)
+ # second, send embeddings through blocks
+ for block in block_layer:
+ layer_outputs = block(hidden_states, height, width, output_attentions)
+ hidden_states = layer_outputs[0]
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+ if idx != num_blocks - 1:
+ hidden_states = hidden_states.reshape(batch_size, height, width, -1).permute(0, 3, 1, 2).contiguous()
+ hidden_states = self.layer_norm(hidden_states)
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+
+class PvtPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = PvtConfig
+ base_model_prefix = "pvt"
+ main_input_name = "pixel_values"
+
+ def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
+ """Initialize the weights"""
+ if isinstance(module, nn.Linear):
+ # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid
+ # `trunc_normal_cpu` not implemented in `half` issues
+ module.weight.data = nn.init.trunc_normal_(module.weight.data, mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+ elif isinstance(module, PvtPatchEmbeddings):
+ module.position_embeddings.data = nn.init.trunc_normal_(
+ module.position_embeddings.data,
+ mean=0.0,
+ std=self.config.initializer_range,
+ )
+ if module.cls_token is not None:
+ module.cls_token.data = nn.init.trunc_normal_(
+ module.cls_token.data,
+ mean=0.0,
+ std=self.config.initializer_range,
+ )
+
+
+PVT_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`~PvtConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+PVT_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`PvtImageProcessor.__call__`]
+ for details.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare Pvt encoder outputting raw hidden-states without any specific head on top.",
+ PVT_START_DOCSTRING,
+)
+class PvtModel(PvtPreTrainedModel):
+ def __init__(self, config: PvtConfig):
+ super().__init__(config)
+ self.config = config
+
+ # hierarchical Transformer encoder
+ self.encoder = PvtEncoder(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(PVT_INPUTS_DOCSTRING.format("(batch_size, channels, height, width)"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ modality="vision",
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
+ )
+ def forward(
+ self,
+ pixel_values: torch.FloatTensor,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ encoder_outputs = self.encoder(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+
+ if not return_dict:
+ return (sequence_output,) + encoder_outputs[1:]
+
+ return BaseModelOutput(
+ last_hidden_state=sequence_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Pvt Model transformer with an image classification head on top (a linear layer on top of the final hidden state of
+ the [CLS] token) e.g. for ImageNet.
+ """,
+ PVT_START_DOCSTRING,
+)
+class PvtForImageClassification(PvtPreTrainedModel):
+ def __init__(self, config: PvtConfig) -> None:
+ super().__init__(config)
+
+ self.num_labels = config.num_labels
+ self.pvt = PvtModel(config)
+
+ # Classifier head
+ self.classifier = (
+ nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(PVT_INPUTS_DOCSTRING.format("(batch_size, channels, height, width)"))
+ @add_code_sample_docstrings(
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
+ output_type=ImageClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
+ )
+ def forward(
+ self,
+ pixel_values: Optional[torch.Tensor],
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[tuple, ImageClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.pvt(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ logits = self.classifier(sequence_output[:, 0, :])
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return ImageClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__init__.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ebc4caef2da10a13c3b135463d51e115b542ace6
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__init__.py
@@ -0,0 +1,65 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "configuration_seamless_m4t_v2": ["SEAMLESS_M4T_V2_PRETRAINED_CONFIG_ARCHIVE_MAP", "SeamlessM4Tv2Config"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_seamless_m4t_v2"] = [
+ "SEAMLESS_M4T_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "SeamlessM4Tv2ForTextToSpeech",
+ "SeamlessM4Tv2ForSpeechToSpeech",
+ "SeamlessM4Tv2ForTextToText",
+ "SeamlessM4Tv2ForSpeechToText",
+ "SeamlessM4Tv2Model",
+ "SeamlessM4Tv2PreTrainedModel",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_seamless_m4t_v2 import SEAMLESS_M4T_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, SeamlessM4Tv2Config
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_seamless_m4t_v2 import (
+ SEAMLESS_M4T_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
+ SeamlessM4Tv2ForSpeechToSpeech,
+ SeamlessM4Tv2ForSpeechToText,
+ SeamlessM4Tv2ForTextToSpeech,
+ SeamlessM4Tv2ForTextToText,
+ SeamlessM4Tv2Model,
+ SeamlessM4Tv2PreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9bdf8f0830c4724dcef008f4756c6a209badb1de
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/configuration_seamless_m4t_v2.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/configuration_seamless_m4t_v2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..386b2b12f428c0efd9c43cf2dbe5a4782c6afa0b
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/configuration_seamless_m4t_v2.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/configuration_seamless_m4t_v2.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/configuration_seamless_m4t_v2.py
new file mode 100644
index 0000000000000000000000000000000000000000..28c521f6a589b8695fdcd36324a1682ddcaafa36
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/configuration_seamless_m4t_v2.py
@@ -0,0 +1,426 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" SeamlessM4Tv2 model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+SEAMLESS_M4T_V2_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "": "https://huggingface.co//resolve/main/config.json",
+}
+
+
+class SeamlessM4Tv2Config(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`~SeamlessM4Tv2Model`]. It is used to instantiate
+ an SeamlessM4Tv2 model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the SeamlessM4Tv2
+ [""](https://huggingface.co/"") architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 256102):
+ Vocabulary size of the text modality of the SeamlessM4Tv2 model. Defines the number of different tokens
+ that can be represented by the `inputs_ids` passed when calling [`~SeamlessM4Tv2Model`],
+ [`~SeamlessM4Tv2ForTextToSpeech`] or [`~SeamlessM4Tv2ForTextToText`].
+ t2u_vocab_size (`int`, *optional*, defaults to 10082):
+ Unit vocabulary size of the SeamlessM4Tv2 model. Defines the number of different "unit tokens" that can be
+ represented by the `inputs_ids` passed when calling the Text-To-Units sub-model of [`~SeamlessM4Tv2Model`],
+ [`~SeamlessM4Tv2ForSpeechToSpeech`] or [`~SeamlessM4Tv2ForTextToSpeech`].
+ char_vocab_size (`int`, *optional*, defaults to 10943):
+ Character vocabulary size of the SeamlessM4Tv2 model. Defines the number of different character tokens that
+ can be represented by the `char_inputs_ids` passed when calling the Text-To-Units sub-model of
+ [`~SeamlessM4Tv2Model`], [`~SeamlessM4Tv2ForSpeechToSpeech`] or [`~SeamlessM4Tv2ForTextToSpeech`].
+
+ > Parameters shared across sub-models
+
+ hidden_size (`int`, *optional*, defaults to 1024):
+ Dimensionality of the "intermediate" layers in the architecture.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
+ The epsilon used by the layer normalization layers.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models).
+ max_position_embeddings (`int`, *optional*, defaults to 4096):
+ The maximum sequence length that this model text encoder and decoder might ever be used with. Typically set
+ this to something large just in case (e.g., 512 or 1024 or 2048).
+ is_encoder_decoder (`bool`, *optional*, defaults to `True`):
+ Whether the model is used as an encoder/decoder or not.
+ encoder_layerdrop (`float`, *optional*, defaults to 0.05):
+ The LayerDrop probability for the encoders. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
+ for more details.
+ decoder_layerdrop (`float`, *optional*, defaults to 0.05):
+ The LayerDrop probability for the decoders. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
+ for more details.
+ activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
+ The non-linear activation function (function or string) in the decoder and feed-forward layers. If string,
+ `"gelu"`, `"relu"`, `"selu"`, `"swish"` and `"gelu_new"` are supported.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, decoder, and pooler.
+ attention_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all attention layers.
+ activation_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout probability for all activation layers in the model.
+ scale_embedding (`bool`, *optional*, defaults to `True`):
+ Scale embeddings by diving by sqrt(d_model).
+
+ > Text encoder and text decoder specific parameters
+
+ encoder_layers (`int`, *optional*, defaults to 24):
+ Number of hidden layers in the Transformer text encoder.
+ encoder_ffn_dim (`int`, *optional*, defaults to 8192):
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text encoder.
+ encoder_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer text encoder.
+ decoder_layers (`int`, *optional*, defaults to 24):
+ Number of hidden layers in the Transformer text decoder.
+ decoder_ffn_dim (`int`, *optional*, defaults to 8192):
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text decoder.
+ decoder_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer text decoder.
+ decoder_start_token_id (`int`, *optional*, defaults to 3):
+ If an encoder-decoder model starts decoding with a different token than _bos_, the id of that token. Only
+ applied in the text decoder.
+ max_new_tokens (`int`, *optional*, defaults to 256):
+ The maximum numbers of text tokens to generate, ignoring the number of tokens in the prompt.
+ pad_token_id (`int`, *optional*, defaults to 0):
+ The id of the _padding_ text token. Only applied to the text-decoder model.
+ bos_token_id (`int`, *optional*, defaults to 2):
+ The id of the _beginning-of-stream_ text token. Only applied to the text-decoder model.
+ eos_token_id (`int`, *optional*, defaults to 3):
+ The id of the _end-of-stream_ text token. Only applied to the text-decoder model.
+
+ > Speech encoder specific parameters
+
+ speech_encoder_layers (`int`, *optional*, defaults to 24):
+ Number of hidden layers in the Transformer speech encoder.
+ speech_encoder_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer speech encoder.
+ speech_encoder_intermediate_size (`int`, *optional*, defaults to 4096):
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer speech encoder.
+ speech_encoder_hidden_act (`str` or `function`, *optional*, defaults to `"swish"`):
+ The non-linear activation function (function or string) in the speech encoder. If string, `"gelu"`,
+ `"relu"`, `"selu"`, `"swish"` and `"gelu_new"` are supported.
+ speech_encoder_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout probability for all layers in the speech encoder.
+ add_adapter (`bool`, *optional*, defaults to `True`):
+ Add an adapter layer on top of the speech encoder.
+ speech_encoder_layerdrop (`float`, *optional*, defaults to 0.1):
+ The LayerDrop probability for the speech encoder. See the [LayerDrop paper](see
+ https://arxiv.org/abs/1909.11556) for more details.
+ feature_projection_input_dim (`int`, *optional*, defaults to 160):
+ Input dimension of the input feature projection of the speech encoder, i.e the dimension after processing
+ input audios with [`SeamlessM4TFeatureExtractor`].
+ adaptor_kernel_size (`int`, *optional*, defaults to 8):
+ Kernel size of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
+ adaptor_stride (`int`, *optional*, defaults to 8):
+ Stride of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
+ adaptor_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all layers in the speech adapter.
+ num_adapter_layers (`int`, *optional*, defaults to 1):
+ Number of convolutional layers that should be used in the adapter network. Only relevant if `add_adapter is
+ True`.
+ position_embeddings_type (`str`, *optional*, defaults to `"relative_key"`):
+ Can be specified to `relative_key`. If left to `None`, no relative position embedding is applied. Only
+ applied to the speech encoder. For more information on `"relative_key"`, please refer to [Self-Attention
+ with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
+ conv_depthwise_kernel_size (`int`, *optional*, defaults to 31):
+ Kernel size of convolutional depthwise 1D layer in Conformer blocks. Only applied to the speech encoder.
+ left_max_position_embeddings (`int`, *optional*, defaults to 64):
+ The left clipping value for relative positions.
+ right_max_position_embeddings (`int`, *optional*, defaults to 8):
+ The right clipping value for relative positions.
+ speech_encoder_chunk_size (`int`, *optional*, defaults to 20000): The size of each attention chunk.
+ speech_encoder_left_chunk_num (`int`, *optional*, defaults to 128):
+ Number of chunks on the left up to which lookahead is allowed.
+
+ > Text-To-Unit (t2u) model specific parameters
+
+ t2u_bos_token_id (`int`, *optional*, defaults to 0):
+ The id of the _beginning-of-stream_ unit token. Only applied to the text-to-unit seq2seq model.
+ t2u_pad_token_id (`int`, *optional*, defaults to 1):
+ The id of the _padding_ unit token. Only applied to the text-to-unit seq2seq model.
+ t2u_eos_token_id (`int`, *optional*, defaults to 2):
+ The id of the _end-of-stream_ unit token. Only applied to the text-to-unit seq2seq model.
+ t2u_encoder_layers (`int`, *optional*, defaults to 6):
+ Number of hidden layers in the Transformer text-to-unit encoder.
+ t2u_encoder_ffn_dim (`int`, *optional*, defaults to 8192):
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text-to-unit encoder.
+ t2u_encoder_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer text-to-unit encoder.
+ t2u_decoder_layers (`int`, *optional*, defaults to 6):
+ Number of hidden layers in the Transformer text-to-unit decoder.
+ t2u_decoder_ffn_dim (`int`, *optional*, defaults to 8192):
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text-to-unit decoder.
+ t2u_decoder_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer text-to-unit decoder.
+ t2u_max_position_embeddings (`int`, *optional*, defaults to 4096):
+ The maximum sequence length that this model text-to-unit component might ever be used with. Typically set
+ this to something large just in case (e.g., 512 or 1024 or 2048).
+ t2u_variance_predictor_embed_dim (`int`, *optional*, defaults to 1024):
+ The projection dimension of the text-to-unit's duration predictor.
+ t2u_variance_predictor_hidden_dim (`int`, *optional*, defaults to 256):
+ Internal dimension of the text-to-unit's duration predictor.
+ t2u_variance_predictor_kernel_size (`int`, *optional*, defaults to 3):
+ Kernel size of the convolutional layers of the text-to-unit's duration predictor.
+ t2u_variance_pred_dropout (`float`, *optional*, defaults to 0.5):
+ The dropout probability of the text-to-unit's duration predictor.
+
+ > Hifi-Gan Vocoder specific parameters
+
+ sampling_rate (`int`, *optional*, defaults to 16000):
+ The sampling rate at which the output audio will be generated, expressed in hertz (Hz).
+ upsample_initial_channel (`int`, *optional*, defaults to 512):
+ The number of input channels into the hifi-gan upsampling network. Applies to the vocoder only.
+ upsample_rates (`Tuple[int]` or `List[int]`, *optional*, defaults to `[5, 4, 4, 2, 2]`):
+ A tuple of integers defining the stride of each 1D convolutional layer in the vocoder upsampling network.
+ The length of *upsample_rates* defines the number of convolutional layers and has to match the length of
+ *upsample_kernel_sizes*. Applies to the vocoder only.
+ upsample_kernel_sizes (`Tuple[int]` or `List[int]`, *optional*, defaults to `[11, 8, 8, 4, 4]`):
+ A tuple of integers defining the kernel size of each 1D convolutional layer in the vocoder upsampling
+ network. The length of *upsample_kernel_sizes* defines the number of convolutional layers and has to match
+ the length of *upsample_rates*. Applies to the vocoder only.
+ resblock_kernel_sizes (`Tuple[int]` or `List[int]`, *optional*, defaults to `[3, 7, 11]`):
+ A tuple of integers defining the kernel sizes of the vocoder 1D convolutional layers in the multi-receptive
+ field fusion (MRF) module. Applies to the vocoder only.
+ resblock_dilation_sizes (`Tuple[Tuple[int]]` or `List[List[int]]`, *optional*, defaults to `[[1, 3, 5], [1, 3, 5], [1, 3, 5]]`):
+ A nested tuple of integers defining the dilation rates of the vocoder dilated 1D convolutional layers in
+ the multi-receptive field fusion (MRF) module. Applies to the vocoder only.
+ leaky_relu_slope (`float`, *optional*, defaults to 0.1):
+ The angle of the negative slope used by the leaky ReLU activation in the vocoder. Applies to the vocoder
+ only.
+ unit_hifi_gan_vocab_size (`int`, *optional*, defaults to 10000):
+ Vocabulary size of the SeamlessM4Tv2 vocoder. Defines the number of different unit tokens that can be
+ represented by the `inputs_ids` passed when calling the vocoder of [`~SeamlessM4Tv2Model`],
+ [`~SeamlessM4Tv2ForSpeechToSpeech`] or [`~SeamlessM4Tv2ForTextToSpeech`].
+ unit_embed_dim (`int`, *optional*, defaults to 1280):
+ The projection dimension of the input ids given to the hifi-gan vocoder. Applies to the vocoder only.
+ lang_embed_dim (`int`, *optional*, defaults to 256):
+ The projection dimension of the target language given to the hifi-gan vocoder. Applies to the vocoder only.
+ spkr_embed_dim (`int`, *optional*, defaults to 256):
+ The projection dimension of the speaker id given to the hifi-gan vocoder. Applies to the vocoder only.
+ vocoder_num_langs (`int`, *optional*, defaults to 36):
+ Number of langs supported by the vocoder. Might be different from `t2u_num_langs`.
+ vocoder_num_spkrs (`int`, *optional*, defaults to 200):
+ Number of speakers supported by the vocoder.
+ variance_predictor_kernel_size (`int`, *optional*, defaults to 3):
+ Kernel size of the duration predictor. Applies to the vocoder only.
+ var_pred_dropout (`float`, *optional*, defaults to 0.5):
+ The dropout probability of the duration predictor. Applies to the vocoder only.
+ vocoder_offset (`int`, *optional*, defaults to 4):
+ Offset the unit token ids by this number to account for symbol tokens. Applies to the vocoder only.
+
+ ```python
+ >>> from transformers import SeamlessM4Tv2Model, SeamlessM4Tv2Config
+
+ >>> # Initializing a SeamlessM4Tv2 "" style configuration
+ >>> configuration = SeamlessM4Tv2Config()
+
+ >>> # Initializing a model from the "" style configuration
+ >>> model = SeamlessM4Tv2Model(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "seamless_m4t_v2"
+
+ def __init__(
+ self,
+ vocab_size=256102,
+ t2u_vocab_size=10082,
+ char_vocab_size=10943,
+ # shared config
+ hidden_size=1024,
+ initializer_range=0.02,
+ layer_norm_eps=1e-5,
+ use_cache=True,
+ max_position_embeddings=4096,
+ is_encoder_decoder=True,
+ encoder_layerdrop=0.05,
+ decoder_layerdrop=0.05,
+ activation_function="relu",
+ dropout=0.1,
+ attention_dropout=0.1,
+ activation_dropout=0.0,
+ scale_embedding=True,
+ # text encoder|decoder
+ encoder_layers=24,
+ encoder_ffn_dim=8192,
+ encoder_attention_heads=16,
+ decoder_layers=24,
+ decoder_ffn_dim=8192,
+ decoder_attention_heads=16,
+ decoder_start_token_id=3,
+ max_new_tokens=256,
+ pad_token_id=0,
+ bos_token_id=2,
+ eos_token_id=3,
+ # speech_encoder
+ speech_encoder_layers=24,
+ speech_encoder_attention_heads=16,
+ speech_encoder_intermediate_size=4096,
+ speech_encoder_hidden_act="swish",
+ speech_encoder_dropout=0.0,
+ add_adapter=True,
+ speech_encoder_layerdrop=0.1,
+ feature_projection_input_dim=160,
+ adaptor_kernel_size=8,
+ adaptor_stride=8,
+ adaptor_dropout=0.1,
+ num_adapter_layers=1,
+ position_embeddings_type="relative_key",
+ conv_depthwise_kernel_size=31,
+ left_max_position_embeddings=64,
+ right_max_position_embeddings=8,
+ speech_encoder_chunk_size=20000,
+ speech_encoder_left_chunk_num=128,
+ # t2u config
+ t2u_bos_token_id=0,
+ t2u_pad_token_id=1,
+ t2u_eos_token_id=2,
+ t2u_encoder_layers=6,
+ t2u_encoder_ffn_dim=8192,
+ t2u_encoder_attention_heads=16,
+ t2u_decoder_layers=6,
+ t2u_decoder_ffn_dim=8192,
+ t2u_decoder_attention_heads=16,
+ t2u_max_position_embeddings=4096,
+ t2u_variance_predictor_embed_dim=1024,
+ t2u_variance_predictor_hidden_dim=256,
+ t2u_variance_predictor_kernel_size=3,
+ t2u_variance_pred_dropout=0.5,
+ # hifi-gan vocoder config
+ sampling_rate=16000,
+ upsample_initial_channel=512,
+ upsample_rates=[5, 4, 4, 2, 2],
+ upsample_kernel_sizes=[11, 8, 8, 4, 4],
+ resblock_kernel_sizes=[3, 7, 11],
+ resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
+ leaky_relu_slope=0.1,
+ # specific to Code Hifi-Gan
+ unit_hifi_gan_vocab_size=10000,
+ unit_embed_dim=1280,
+ lang_embed_dim=256,
+ spkr_embed_dim=256,
+ vocoder_num_langs=36,
+ vocoder_num_spkrs=200,
+ variance_predictor_kernel_size=3,
+ var_pred_dropout=0.5,
+ vocoder_offset=4,
+ **kwargs,
+ ):
+ # overall_config
+ self.vocab_size = vocab_size
+ self.t2u_vocab_size = t2u_vocab_size
+ self.char_vocab_size = char_vocab_size
+ self.hidden_size = hidden_size
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.max_position_embeddings = max_position_embeddings
+ self.use_cache = use_cache
+ self.max_new_tokens = max_new_tokens
+ self.encoder_layerdrop = encoder_layerdrop
+ self.decoder_layerdrop = decoder_layerdrop
+ self.activation_function = activation_function
+ self.dropout = dropout
+ self.attention_dropout = attention_dropout
+ self.activation_dropout = activation_dropout
+ self.scale_embedding = scale_embedding
+ # for proper config init
+ self.num_attention_heads = decoder_attention_heads
+ self.num_hidden_layers = decoder_layers
+
+ # text|unit encoder|decoder
+ self.encoder_layers = encoder_layers
+ self.encoder_ffn_dim = encoder_ffn_dim
+ self.encoder_attention_heads = encoder_attention_heads
+ self.decoder_layers = decoder_layers
+ self.decoder_ffn_dim = decoder_ffn_dim
+ self.decoder_attention_heads = decoder_attention_heads
+
+ # speech_encoder
+ self.speech_encoder_layers = speech_encoder_layers
+ self.speech_encoder_hidden_act = speech_encoder_hidden_act
+ self.speech_encoder_dropout = speech_encoder_dropout
+ self.speech_encoder_attention_heads = speech_encoder_attention_heads
+ self.speech_encoder_layerdrop = speech_encoder_layerdrop
+ self.speech_encoder_intermediate_size = speech_encoder_intermediate_size
+ self.feature_projection_input_dim = feature_projection_input_dim
+ self.adaptor_kernel_size = adaptor_kernel_size
+ self.adaptor_stride = adaptor_stride
+ self.adaptor_dropout = adaptor_dropout
+ self.num_adapter_layers = num_adapter_layers
+ self.position_embeddings_type = position_embeddings_type
+ self.conv_depthwise_kernel_size = conv_depthwise_kernel_size
+ self.add_adapter = add_adapter
+ self.left_max_position_embeddings = left_max_position_embeddings
+ self.right_max_position_embeddings = right_max_position_embeddings
+ self.speech_encoder_chunk_size = speech_encoder_chunk_size
+ self.speech_encoder_left_chunk_num = speech_encoder_left_chunk_num
+
+ # t2u config
+ self.t2u_bos_token_id = t2u_bos_token_id
+ self.t2u_pad_token_id = t2u_pad_token_id
+ self.t2u_eos_token_id = t2u_eos_token_id
+ self.t2u_encoder_layers = t2u_encoder_layers
+ self.t2u_encoder_ffn_dim = t2u_encoder_ffn_dim
+ self.t2u_encoder_attention_heads = t2u_encoder_attention_heads
+ self.t2u_decoder_layers = t2u_decoder_layers
+ self.t2u_decoder_ffn_dim = t2u_decoder_ffn_dim
+ self.t2u_decoder_attention_heads = t2u_decoder_attention_heads
+ self.t2u_max_position_embeddings = t2u_max_position_embeddings
+ self.t2u_variance_predictor_embed_dim = t2u_variance_predictor_embed_dim # TODO: add to docstrings
+ self.t2u_variance_predictor_hidden_dim = t2u_variance_predictor_hidden_dim # TODO: add to docstrings
+ self.t2u_variance_predictor_kernel_size = t2u_variance_predictor_kernel_size # TODO: add to docstrings
+ self.t2u_variance_pred_dropout = t2u_variance_pred_dropout # TODO: add to docstrings
+
+ # hifi-gan vocoder config
+ # original parameters specific to Hifi-Gan
+ self.sampling_rate = sampling_rate
+ self.upsample_initial_channel = upsample_initial_channel
+ self.upsample_rates = upsample_rates
+ self.upsample_kernel_sizes = upsample_kernel_sizes
+ self.resblock_kernel_sizes = resblock_kernel_sizes
+ self.resblock_dilation_sizes = resblock_dilation_sizes
+ self.leaky_relu_slope = leaky_relu_slope
+
+ # specific to Code Hifi-Gan
+ self.unit_hifi_gan_vocab_size = unit_hifi_gan_vocab_size
+ self.unit_embed_dim = unit_embed_dim
+ self.lang_embed_dim = lang_embed_dim
+ self.spkr_embed_dim = spkr_embed_dim
+ self.vocoder_num_langs = vocoder_num_langs
+ self.vocoder_num_spkrs = vocoder_num_spkrs
+ self.variance_predictor_kernel_size = variance_predictor_kernel_size
+ self.var_pred_dropout = var_pred_dropout
+ self.vocoder_offset = vocoder_offset
+
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ decoder_start_token_id=decoder_start_token_id,
+ is_encoder_decoder=is_encoder_decoder,
+ max_position_embeddings=max_position_embeddings,
+ **kwargs,
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/convert_fairseq2_to_hf.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/convert_fairseq2_to_hf.py
new file mode 100644
index 0000000000000000000000000000000000000000..8d4320cff5bd9bb80670b0ee7db752fdf6f38e60
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/convert_fairseq2_to_hf.py
@@ -0,0 +1,405 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Converting Meta SeamlessM4Tv2 checkpoints from seamless_communication to HF."""
+
+
+import argparse
+import os
+from pathlib import Path
+
+import torch
+from accelerate.utils.modeling import find_tied_parameters
+from seamless_communication.inference import Translator
+
+from transformers import (
+ SeamlessM4TFeatureExtractor,
+ SeamlessM4TProcessor,
+ SeamlessM4TTokenizer,
+ SeamlessM4Tv2Config,
+ SeamlessM4Tv2Model,
+)
+from transformers.utils import logging
+
+
+# fmt: off
+UNIT_SUPPORTED_LANGUAGES = ["__arb__", "__ben__", "__cat__", "__ces__", "__cmn__", "__cym__", "__dan__", "__deu__", "__eng__", "__est__", "__fin__", "__fra__", "__hin__", "__ind__", "__ita__", "__jpn__", "__kan__", "__kor__", "__mlt__", "__nld__", "__pes__", "__pol__", "__por__", "__ron__", "__rus__", "__slk__", "__spa__", "__swe__", "__swh__", "__tam__", "__tel__", "__tgl__", "__tha__", "__tur__", "__ukr__", "__urd__", "__uzn__", "__vie__", ]
+# fmt: on
+
+# fmt: off
+VOCODER_SUPPORTED_LANGUAGES = ["__arb__", "__ben__", "__cat__", "__ces__", "__cmn__", "__cym__", "__dan__", "__deu__", "__eng__", "__est__", "__fin__", "__fra__", "__hin__", "__ind__", "__ita__", "__jpn__", "__kor__", "__mlt__", "__nld__", "__pes__", "__pol__", "__por__", "__ron__", "__rus__", "__slk__", "__spa__", "__swe__", "__swh__", "__tel__", "__tgl__", "__tha__", "__tur__", "__ukr__", "__urd__", "__uzn__", "__vie__",]
+# fmt: on
+
+# fmt: off
+LARGE_SUPPORTED_LANGUAGES = ["afr","amh","arb","ary","arz","asm","azj","bel","ben","bos","bul","cat","ceb","ces","ckb","cmn","cmn_Hant","cym","dan","deu","ell","eng","est","eus","fin","fra","fuv","gaz","gle","glg","guj","heb","hin","hrv","hun","hye","ibo","ind","isl","ita","jav","jpn","kan","kat","kaz","khk","khm","kir","kor","lao","lit","lug","luo","lvs","mai","mal","mar","mkd","mlt","mni","mya","nld","nno","nob","npi","nya","ory","pan","pbt","pes","pol","por","ron","rus","sat","slk","slv","sna","snd","som","spa","srp","swe","swh","tam","tel","tgk","tgl","tha","tur","ukr","urd","uzn","vie","yor","yue","zlm","zul",]
+# fmt: on
+
+
+def assert_param_count(model_1, model_2):
+ count_1 = sum(p[1].numel() for p in model_1.named_parameters() if "final_proj" not in p[0])
+ count_2 = sum(p[1].numel() for p in model_2.named_parameters() if "final_proj" not in p[0])
+ assert count_1 == count_2, f"{model_1.__class__}: {count_1} != {model_2.__class__}: {count_2}"
+
+
+def param_count(model):
+ return sum(p[1].numel() for p in model.named_parameters() if "final_proj" not in p[0])
+
+
+def _grab_best_device(use_gpu=True):
+ if torch.cuda.device_count() > 0 and use_gpu:
+ device = "cuda"
+ else:
+ device = "cpu"
+ return torch.device(device)
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+vocoder_convert_list = [
+ ("ups", "hifi_gan.upsampler"),
+ ("conv_pre", "hifi_gan.conv_pre"),
+ ("resblocks", "hifi_gan.resblocks"),
+ ("conv_post", "hifi_gan.conv_post"),
+ ("lang", "language_embedding"),
+ ("spkr", "speaker_embedding"),
+ ("dict.", "unit_embedding."),
+ ("dur_predictor.conv1.0", "dur_predictor.conv1"),
+ ("dur_predictor.conv2.0", "dur_predictor.conv2"),
+]
+
+# order is important
+wav2vec_convert_list = [
+ ("speech_encoder_frontend.model_dim_proj", "feature_projection.projection"),
+ ("speech_encoder_frontend.post_extract_layer_norm", "feature_projection.layer_norm"),
+ ("speech_encoder_frontend.pos_encoder.conv", "encoder.pos_conv_embed.conv"),
+ ("speech_encoder.inner.layers", "encoder.layers"),
+ ("speech_encoder.inner_layer_norm", "encoder.layer_norm"),
+ ("speech_encoder.adaptor_layers", "adapter.layers"),
+ ("inner_proj", "intermediate_dense"),
+ ("self_attn.output_proj", "self_attn.linear_out"),
+ ("output_proj", "output_dense"),
+ ("self_attn.k_proj", "self_attn.linear_k"),
+ ("self_attn.v_proj", "self_attn.linear_v"),
+ ("self_attn.q_proj", "self_attn.linear_q"),
+ ("self_attn.sdpa.u_bias", "self_attn.pos_bias_u"),
+ ("self_attn.sdpa.v_bias", "self_attn.pos_bias_v"),
+ ("self_attn.sdpa.rel_k_embed", "self_attn.distance_embedding"),
+ ("self_attn.sdpa.r_proj", "self_attn.linear_pos"),
+ ("conv.pointwise_conv1", "conv_module.pointwise_conv1"),
+ ("conv.pointwise_conv2", "conv_module.pointwise_conv2"),
+ ("conv.depthwise_conv", "conv_module.depthwise_conv"),
+ ("conv.batch_norm", "conv_module.batch_norm"),
+ ("conv.layer_norm", "conv_module.depthwise_layer_norm"),
+ ("conv_layer_norm", "conv_module.layer_norm"),
+ ("speech_encoder.proj1", "intermediate_ffn.intermediate_dense"),
+ ("speech_encoder.proj2", "intermediate_ffn.output_dense"),
+ ("speech_encoder.layer_norm", "inner_layer_norm"),
+]
+
+t2u_convert_list = [
+ ("t2u_model.final_proj", "lm_head"),
+ ("t2u_model.", "model."),
+ ("encoder_decoder_attn_layer_norm", "cross_attention_layer_norm"),
+ ("encoder_decoder_attn", "cross_attention"),
+ ("linear_k", "k_proj"),
+ ("linear_v", "v_proj"),
+ ("linear_q", "q_proj"),
+ ("ffn.inner_proj", "ffn.fc1"),
+ ("ffn.output_proj", "ffn.fc2"),
+ ("output_proj", "out_proj"),
+ ("decoder_frontend.embed_char", "decoder.embed_char"),
+ ("decoder_frontend.pos_emb_alpha_char", "decoder.pos_emb_alpha_char"),
+ ("decoder_frontend.embed", "decoder.embed_tokens"),
+ ("decoder_frontend.pos_emb_alpha", "decoder.pos_emb_alpha"),
+ ("conv1d.conv", "conv"),
+ ("conv1d_layer_norm", "conv_layer_norm"),
+ ("decoder_frontend.variance_adaptor", "decoder"),
+ ("duration_predictor.conv1.0", "duration_predictor.conv1"),
+ ("duration_predictor.conv2.0", "duration_predictor.conv2"),
+]
+
+text_convert_list = [
+ ("text_encoder.", ""),
+ ("text_decoder.", ""),
+ ("text_encoder_frontend.embed", "embed_tokens"),
+ ("text_decoder_frontend.embed", "embed_tokens"),
+ ("encoder_decoder_attn_layer_norm", "cross_attention_layer_norm"),
+ ("encoder_decoder_attn", "cross_attention"),
+ ("linear_k", "k_proj"),
+ ("linear_v", "v_proj"),
+ ("linear_q", "q_proj"),
+ ("ffn.inner_proj", "ffn.fc1"),
+ ("ffn.output_proj", "ffn.fc2"),
+ ("output_proj", "out_proj"),
+ ("final_proj", "lm_head"),
+]
+
+CUR_PATH = os.path.dirname(os.path.abspath(__file__))
+default_cache_dir = os.path.join(os.path.expanduser("~"), ".cache")
+CACHE_DIR = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "huggingface", "hub")
+
+
+def _load_hf_config():
+ return SeamlessM4Tv2Config()
+
+
+def _convert_model(
+ original_model,
+ hf_model,
+ convert_list,
+ device,
+ unwanted_prefix="model.",
+ filter_state_dict="speech",
+ exclude_state_dict=None,
+):
+ state_dict = original_model.state_dict()
+
+ # filter func
+ if isinstance(filter_state_dict, str):
+
+ def filter_func(x):
+ return filter_state_dict in x[0]
+
+ else:
+
+ def filter_func(item):
+ if exclude_state_dict is not None and exclude_state_dict in item[0]:
+ return False
+ for filter_el in filter_state_dict:
+ if filter_el in item[0]:
+ return True
+
+ return False
+
+ state_dict = dict(filter(filter_func, state_dict.items()))
+
+ for k, v in list(state_dict.items()):
+ new_k = k[len(unwanted_prefix) :]
+ for old_layer_name, new_layer_name in convert_list:
+ if old_layer_name in new_k:
+ new_k = new_k.replace(old_layer_name, new_layer_name)
+
+ # must do it by hand
+ if ".layer_norm" in new_k and new_k.split(".layer_norm")[0][-1].isnumeric():
+ new_k = new_k.replace("layer_norm", "final_layer_norm")
+
+ state_dict[new_k] = state_dict.pop(k)
+
+ extra_keys = set(state_dict.keys()) - set(hf_model.state_dict().keys())
+ extra_keys = set(extra_keys)
+ missing_keys = set(hf_model.state_dict().keys()) - set(state_dict.keys())
+ missing_keys = set({k for k in missing_keys if "final_logits_bias" not in k})
+ if len(extra_keys) != 0:
+ raise ValueError(f"extra keys found: {extra_keys}")
+ if len(missing_keys) != 0:
+ raise ValueError(f"missing keys: {missing_keys}")
+ hf_model.load_state_dict(state_dict, strict=False)
+ n_params = param_count(hf_model)
+
+ logger.info(f"model loaded: {round(n_params/1e6,1)}M params")
+
+ hf_model.eval()
+ hf_model.to(device)
+ del state_dict
+
+ return hf_model
+
+
+def load_model(save_dir, model_type, repo_id):
+ """
+ Meta SeamlessM4Tv2 is made of 8 main components:
+ - speech_encoder (#1) and speech_encoder_frontend (#2)
+ - t2u_model (#3)
+ - text_encoder (#4) and text_encoder_frontend (#5)
+ - text_decoder (#6) [and text_decoder_frontend (#5) = equals to text_encoder_frontend]
+ - final_proj (#7)
+ - vocoder (#8)
+ """
+ device = _grab_best_device()
+ name = "seamlessM4T_v2_large"
+
+ original_model = Translator(name, "vocoder_v2", device, dtype=torch.float32)
+
+ ######### TOKENIZER
+
+ langs = LARGE_SUPPORTED_LANGUAGES
+ langs = [f"__{lang}__" for lang in langs]
+ vocab_file = os.path.join(os.path.expanduser("~"), "tokenizer", model_type, "tokenizer.model")
+
+ save_dir = os.path.join(save_dir, name)
+ Path(save_dir).mkdir(exist_ok=True)
+
+ tokenizer = SeamlessM4TTokenizer(vocab_file, additional_special_tokens=langs)
+
+ sanity_check_lang_id = tokenizer.convert_tokens_to_ids("__fra__")
+
+ tokenizer.save_pretrained(save_dir)
+ tokenizer = SeamlessM4TTokenizer.from_pretrained(save_dir)
+
+ if sanity_check_lang_id != tokenizer.convert_tokens_to_ids("__fra__"):
+ raise ValueError(
+ f"Error in tokenizer saving/loading - __fra__ lang id is not coherent: {sanity_check_lang_id} vs {tokenizer.convert_tokens_to_ids('__fra__')}"
+ )
+
+ ####### get language to ids dict
+ text_decoder_lang_code_to_id = {lang.replace("__", ""): tokenizer.convert_tokens_to_ids(lang) for lang in langs}
+ # offset: vocoder unit vocab size + 5 (for EOS/PAD/BOS/UNK/MSK) + len(supported_languages)
+ t2u_lang_code_to_id = {
+ code.replace("__", ""): i + 10005 + len(UNIT_SUPPORTED_LANGUAGES)
+ for i, code in enumerate(UNIT_SUPPORTED_LANGUAGES)
+ }
+ vocoder_lang_code_to_id = {code.replace("__", ""): i for i, code in enumerate(VOCODER_SUPPORTED_LANGUAGES)}
+
+ ######### FE
+
+ fe = SeamlessM4TFeatureExtractor(language_code=langs)
+
+ fe.save_pretrained(save_dir)
+ fe = SeamlessM4TFeatureExtractor.from_pretrained(save_dir)
+
+ processor = SeamlessM4TProcessor(feature_extractor=fe, tokenizer=tokenizer)
+ processor.save_pretrained(save_dir)
+ processor.push_to_hub(repo_id=repo_id, create_pr=True)
+
+ processor = SeamlessM4TProcessor.from_pretrained(save_dir)
+
+ ######## Model
+
+ # init config
+ hf_config = _load_hf_config()
+
+ ######## get id_to_text and char_to_id from original model tokenizers
+ id_to_text = {i: original_model.text_tokenizer.model.index_to_token(i) for i in range(hf_config.vocab_size)}
+ char_to_id = {
+ original_model.model.t2u_model.decoder_frontend.char_tokenizer.model.index_to_token(i): i for i in range(10904)
+ }
+
+ # init model
+ hf_model = SeamlessM4Tv2Model(hf_config)
+
+ hf_model.generation_config.__setattr__("text_decoder_lang_to_code_id", text_decoder_lang_code_to_id)
+ hf_model.generation_config.__setattr__("t2u_lang_code_to_id", t2u_lang_code_to_id)
+ hf_model.generation_config.__setattr__("vocoder_lang_code_to_id", vocoder_lang_code_to_id)
+ hf_model.generation_config.__setattr__("id_to_text", id_to_text)
+ hf_model.generation_config.__setattr__("char_to_id", char_to_id)
+
+ # -1. take care of vocoder
+ # similarly to speech T5 must apply and remove weight norm
+ hf_model.vocoder.apply_weight_norm()
+ hf_model.vocoder = _convert_model(
+ original_model,
+ hf_model.vocoder,
+ vocoder_convert_list,
+ device,
+ unwanted_prefix="vocoder.code_generator.",
+ filter_state_dict="vocoder",
+ )
+ hf_model.vocoder.remove_weight_norm()
+
+ # 1. take care of speech encoder
+ wav2vec = hf_model.speech_encoder
+ hf_model.speech_encoder = _convert_model(
+ original_model, wav2vec, wav2vec_convert_list, device, unwanted_prefix="model.", filter_state_dict="speech"
+ )
+
+ # 2. take care of t2u
+
+ hf_model.t2u_model = _convert_model(
+ original_model,
+ hf_model.t2u_model,
+ t2u_convert_list,
+ device,
+ unwanted_prefix="model.",
+ filter_state_dict="t2u_model",
+ )
+
+ # 3. take care of text encoder
+ hf_model.text_encoder = _convert_model(
+ original_model,
+ hf_model.text_encoder,
+ text_convert_list,
+ device,
+ unwanted_prefix="model.",
+ filter_state_dict=["model.text_encoder"],
+ exclude_state_dict="t2u_model",
+ )
+
+ # 4. take care of text decoder
+ hf_model.text_decoder = _convert_model(
+ original_model,
+ hf_model.text_decoder,
+ text_convert_list,
+ device,
+ unwanted_prefix="model.",
+ filter_state_dict=["model.text_decoder"],
+ exclude_state_dict="t2u_model",
+ )
+
+ # 5. take care of final proj
+ hf_model.lm_head = _convert_model(
+ original_model,
+ hf_model.lm_head,
+ [("final_proj.", "")],
+ device,
+ unwanted_prefix="model.",
+ filter_state_dict=["model.final_proj"],
+ exclude_state_dict="t2u_model",
+ )
+
+ # sanity check
+ print(find_tied_parameters(hf_model))
+
+ count_1 = param_count(hf_model)
+ count_2 = param_count(original_model)
+
+ print(f"HF MODEL:{count_1}, ORIGINAL_MODEL: {count_2}, diff:{count_1 - count_2}")
+ print(f"HF MODEL excluding embeddings:{hf_model.num_parameters(exclude_embeddings=True)}")
+
+ del original_model
+
+ hf_model.generation_config._from_model_config = False
+ hf_model.save_pretrained(save_dir)
+ hf_model.push_to_hub(repo_id=repo_id, create_pr=True)
+ hf_model = SeamlessM4Tv2Model.from_pretrained(save_dir)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+
+ parser.add_argument(
+ "--model_type",
+ default="large",
+ type=str,
+ help="Model type.",
+ )
+
+ parser.add_argument(
+ "--save_dir",
+ default="/home/ubuntu/weights_v2",
+ type=str,
+ help="Path to the output PyTorch model.",
+ )
+
+ parser.add_argument(
+ "--repo_id",
+ default="facebook/seamless-m4t-v2-large",
+ type=str,
+ help="Repo ID.",
+ )
+
+ args = parser.parse_args()
+
+ load_model(args.save_dir, args.model_type, args.repo_id)
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py b/env-llmeval/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
new file mode 100644
index 0000000000000000000000000000000000000000..fd64051f6c57b7e85bf75ac862dd3e8c063de58c
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
@@ -0,0 +1,4797 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch SeamlessM4Tv2 model."""
+
+
+import copy
+import math
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import Tensor, nn
+from torch.nn import CrossEntropyLoss
+
+from ...activations import ACT2FN
+from ...deepspeed import is_deepspeed_zero3_enabled
+from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
+from ...modeling_outputs import (
+ BaseModelOutput,
+ BaseModelOutputWithPastAndCrossAttentions,
+ Seq2SeqLMOutput,
+ Seq2SeqModelOutput,
+ Wav2Vec2BaseModelOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+)
+from .configuration_seamless_m4t_v2 import SeamlessM4Tv2Config
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = ""
+_CONFIG_FOR_DOC = "SeamlessM4Tv2Config"
+
+SEAMLESS_M4T_V2_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "facebook/seamless-m4t-v2-large",
+ # See all SeamlessM4T-v2 models at https://huggingface.co/models?filter=seamless_m4t_v2
+]
+
+
+SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP = {
+ "microsoft/speecht5_hifigan": "https://huggingface.co/microsoft/speecht5_hifigan/resolve/main/config.json",
+}
+
+
+@dataclass
+# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TGenerationOutput with SeamlessM4T->SeamlessM4Tv2
+class SeamlessM4Tv2GenerationOutput(ModelOutput):
+ """
+ Class defining the generated outputs from [`SeamlessM4Tv2Model`], [`SeamlessM4Tv2ForTextToText`],
+ [`SeamlessM4Tv2ForTextToSpeech`], [`SeamlessM4Tv2ForSpeechToSpeech`] and [`SeamlessM4Tv2ForTextToSpeech`].
+
+ Args:
+ waveform (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
+ The final audio waveform predicted by the model.
+ waveform_lengths (`torch.IntTensor` of shape `(batch_size,)`, *optional*):
+ The length in samples of each element in the `waveform` batch.
+ sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ The generated translated sequences. This is the output of the text-to-text or the speech-to-text models.
+ The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished
+ early due to the `eos_token_id`.
+ unit_sequences (`torch.LongTensor` of shape `(batch_size, unit_sequence_length)`, *optional*):
+ The generated translated unit sequences. This is the output of the text-to-units model. The second
+ dimension (unit_sequence_length) is either equal to `t2u_max_length` or shorter if all batches finished
+ early due to the `t2u_eos_token_id`.
+ """
+
+ waveform: Optional[torch.FloatTensor] = None
+ waveform_lengths: Optional[torch.IntTensor] = None
+ sequences: Optional[Tuple[torch.FloatTensor]] = None
+ unit_sequences: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class SeamlessM4Tv2TextToUnitDecoderOutput(ModelOutput):
+ """
+ Class defining the outputs from [`SeamlessM4Tv2TextToUnitDecoder`].
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ padding_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indicates which inputs are to be ignored due to padding, where elements are either 1 for *not masked* or 0
+ for *masked*
+ """
+
+ last_hidden_state: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+ padding_mask: Optional[torch.Tensor] = None
+
+
+@dataclass
+class SeamlessM4Tv2TextToUnitOutput(ModelOutput):
+ """
+ Class defining the outputs from [`SeamlessM4Tv2TextToUnitForConditionalGeneration`] and
+ [`SeamlessM4Tv2TextToUnitModel`].
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the decoder of the model.
+
+ If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
+ hidden_size)` is output.
+ padding_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indicates which inputs are to be ignored due to padding, where elements are either 1 for *not masked* or 0
+ for *masked*
+ decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs.
+ decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
+ encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs.
+ encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss.
+ """
+
+ last_hidden_state: torch.FloatTensor = None
+ padding_mask: Optional[torch.Tensor] = None
+ decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ encoder_last_hidden_state: Optional[torch.FloatTensor] = None
+ encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ loss: Optional[torch.FloatTensor] = None
+
+
+SEAMLESS_M4T_V2_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`~SeamlessM4Tv2Config`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+SEAMLESS_M4T_V2_MULTIMODAL_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`SeamlessM4TTokenizer`] or [`SeamlessM4TProcessor`]. See
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_banks)`):
+ Input audio features. This should be returnes by the [`SeamlessM4TFeatureExtractor`] class or the
+ [`SeamlessM4TProcessor`] class. See [`SeamlessM4TFeatureExtractor.__call__`] for details.
+ """
+
+M4T_TEXT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`SeamlessM4TTokenizer`] or [`SeamlessM4TProcessor`]. See
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ """
+
+M4T_SPEECH_INPUTS_DOCSTRING = r"""
+ Args:
+ input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_banks)`):
+ Input audio features. This should be returnes by the [`SeamlessM4TFeatureExtractor`] class or the
+ [`SeamlessM4TProcessor`] class. See [`SeamlessM4TFeatureExtractor.__call__`] for details.
+ """
+
+SEAMLESS_M4T_V2_END_INPUTS_DOCSTRING = r"""
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ Bart uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
+ is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
+
+ For translation and summarization training, `decoder_input_ids` should be provided. If no
+ `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
+ for denoising pre-training following the paper.
+ decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+
+ If you want to change padding behavior, you should read [`modeling_bart._prepare_decoder_attention_mask`]
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
+ information on the default strategy.
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape`(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
+ input (see `past_key_values`). This is useful if you want more control over how to convert
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
+
+ If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
+ of `inputs_embeds`.
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+M4T_MODEL_INPUTS_DOCSTRING = SEAMLESS_M4T_V2_MULTIMODAL_INPUTS_DOCSTRING + SEAMLESS_M4T_V2_END_INPUTS_DOCSTRING
+
+M4T_TEXT_INPUTS_DOCSTRING = M4T_TEXT_INPUTS_DOCSTRING + SEAMLESS_M4T_V2_END_INPUTS_DOCSTRING
+
+M4T_SPEECH_INPUTS_DOCSTRING = M4T_SPEECH_INPUTS_DOCSTRING + SEAMLESS_M4T_V2_END_INPUTS_DOCSTRING
+
+M4T_TEXT_TO_UNITS_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`SeamlessM4TTokenizer`] or [`SeamlessM4TProcessor`]. See
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ char_input_ids (`torch.LongTensor` of shape `(batch_size, char_sequence_length)`):
+ Character indices. The correspondence between characters and indices can be found in `char_to_id`, a
+ dictionary in the generation configuration.
+ char_count_per_id (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Number of characters per input id.
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ inputs_embeds (`torch.FloatTensor` of shape`(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+############ UTILS ################
+
+
+# Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids
+def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
+ """
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
+ are ignored. This is modified from fairseq's `utils.make_positions`.
+
+ Args:
+ x: torch.Tensor x:
+
+ Returns: torch.Tensor
+ """
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
+ mask = input_ids.ne(padding_idx).int()
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
+ return incremental_indices.long() + padding_idx
+
+
+# Copied from transformers.models.bart.modeling_bart.shift_tokens_right
+def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
+ """
+ Shift input ids one token to the right.
+ """
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
+ shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
+ shifted_input_ids[:, 0] = decoder_start_token_id
+
+ if pad_token_id is None:
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
+ # replace possible -100 values in labels by `pad_token_id`
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
+
+ return shifted_input_ids
+
+
+def _compute_new_attention_mask(hidden_states: torch.Tensor, seq_lens: torch.Tensor):
+ """
+ Computes an attention mask of the form `(batch, seq_len)` with an attention for each element in the batch that
+ stops at the corresponding element in `seq_lens`.
+
+ Args:
+ hidden_states (`torch.FloatTensor` of shape `(batch, seq_len, *)`):
+ The sequences to mask, where `*` is any number of sequence-specific dimensions including none.
+ seq_lens (`torch.Tensor` of shape `(batch)`:
+ Each element represents the length of the sequence at the same index in `hidden_states`
+
+ Returns:
+ `torch.FloatTensor`: The float attention mask of shape `(batch, seq_len)`
+ """
+ batch_size, mask_seq_len = hidden_states.shape[:2]
+
+ indices = torch.arange(mask_seq_len, device=seq_lens.device).expand(batch_size, -1)
+
+ bool_mask = indices >= seq_lens.unsqueeze(1).expand(-1, mask_seq_len)
+
+ mask = hidden_states.new_ones((batch_size, mask_seq_len))
+
+ mask = mask.masked_fill(bool_mask, 0)
+
+ return mask
+
+
+# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.format_speech_generation_kwargs with SeamlessM4T->SeamlessM4Tv2
+def format_speech_generation_kwargs(kwargs):
+ """
+ Format kwargs for SeamlessM4Tv2 models that generate speech, attribute kwargs to either the text generation or the
+ speech generation models.
+
+ Args:
+ kwargs (`dict`)`:
+ Keyword arguments are of two types:
+
+ - Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model,
+ except for `decoder_input_ids` which will only be passed through the text components.
+ - With a *text_* or *speech_* prefix, they will be input for the `generate` method of the
+ text model and speech model respectively. It has the priority over the keywords without a prefix.
+
+ This means you can, for example, specify a generation strategy for one generation but not for the
+ other.
+ """
+ # attribute kwargs to models
+ kwargs_text = {}
+ kwargs_speech = {}
+ for key, value in kwargs.items():
+ if key.startswith("text_"):
+ key = key[len("text_") :]
+ kwargs_text[key] = value
+ elif key.startswith("speech_"):
+ key = key[len("speech_") :]
+ kwargs_speech[key] = value
+ else:
+ # If the key is already in a specific config, then it's been set with a
+ # submodules specific value and we don't override
+ if key not in kwargs_text:
+ kwargs_text[key] = value
+ if key not in kwargs_speech:
+ kwargs_speech[key] = value
+ return kwargs_text, kwargs_speech
+
+
+############ SPEECH ENCODER related code ################
+
+
+class SeamlessM4Tv2ConformerFeatureProjection(nn.Module):
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TConformerFeatureProjection.__init__
+ def __init__(self, config):
+ super().__init__()
+ self.layer_norm = nn.LayerNorm(config.feature_projection_input_dim, eps=config.layer_norm_eps)
+ self.projection = nn.Linear(config.feature_projection_input_dim, config.hidden_size)
+ self.dropout = nn.Dropout(config.speech_encoder_dropout)
+
+ def forward(self, hidden_states):
+ # non-projected hidden states are needed for quantization
+ norm_hidden_states = self.layer_norm(hidden_states.to(self.layer_norm.weight.dtype))
+ hidden_states = self.projection(norm_hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TConformerFeedForward with SeamlessM4T->SeamlessM4Tv2
+class SeamlessM4Tv2ConformerFeedForward(nn.Module):
+ def __init__(self, config, act_fn=None, dropout=None):
+ super().__init__()
+ dropout = dropout if dropout is not None else config.speech_encoder_dropout
+ act_fn = act_fn if act_fn is not None else config.speech_encoder_hidden_act
+
+ self.intermediate_dropout = nn.Dropout(dropout)
+ self.intermediate_dense = nn.Linear(config.hidden_size, config.speech_encoder_intermediate_size)
+ self.intermediate_act_fn = ACT2FN[act_fn] if isinstance(act_fn, str) else act_fn
+
+ self.output_dense = nn.Linear(config.speech_encoder_intermediate_size, config.hidden_size)
+ self.output_dropout = nn.Dropout(dropout)
+
+ def forward(self, hidden_states):
+ hidden_states = self.intermediate_dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ hidden_states = self.intermediate_dropout(hidden_states)
+
+ hidden_states = self.output_dense(hidden_states)
+ hidden_states = self.output_dropout(hidden_states)
+ return hidden_states
+
+
+class SeamlessM4Tv2ConformerConvolutionModule(nn.Module):
+ """Convolution block used in the conformer block. Uses a causal depthwise convolution similar to that
+ described in Section 2.1 of `https://doi.org/10.48550/arxiv.1609.03499"""
+
+ def __init__(self, config):
+ super().__init__()
+ if (config.conv_depthwise_kernel_size - 1) % 2 == 1:
+ raise ValueError("`config.conv_depthwise_kernel_size` should be a odd number for 'SAME' padding")
+ self.layer_norm = nn.LayerNorm(config.hidden_size)
+ self.pointwise_conv1 = nn.Conv1d(
+ config.hidden_size,
+ 2 * config.hidden_size,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ bias=False,
+ )
+ self.glu = nn.GLU(dim=1)
+ self.depthwise_conv = nn.Conv1d(
+ config.hidden_size,
+ config.hidden_size,
+ config.conv_depthwise_kernel_size,
+ stride=1,
+ padding=0,
+ groups=config.hidden_size,
+ bias=False,
+ )
+ self.depthwise_layer_norm = nn.LayerNorm(config.hidden_size)
+ self.activation = ACT2FN[config.speech_encoder_hidden_act]
+ self.pointwise_conv2 = nn.Conv1d(
+ config.hidden_size,
+ config.hidden_size,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ bias=False,
+ )
+ self.dropout = nn.Dropout(config.speech_encoder_dropout)
+
+ def forward(self, hidden_states, attention_mask=None):
+ hidden_states = self.layer_norm(hidden_states)
+
+ # Ensure that we do not leak padded positions in depthwise convolution.
+ # Put 0 where necessary
+ if attention_mask is not None:
+ hidden_states = hidden_states.masked_fill(~attention_mask.bool().unsqueeze(-1), 0.0)
+
+ # exchange the temporal dimension and the feature dimension
+ hidden_states = hidden_states.transpose(1, 2)
+
+ # GLU mechanism
+ # => (batch, 2*channel, dim)
+ hidden_states = self.pointwise_conv1(hidden_states)
+ # => (batch, channel, dim)
+ hidden_states = self.glu(hidden_states)
+
+ # Pad the sequence entirely on the left because of causal convolution.
+ hidden_states = torch.nn.functional.pad(hidden_states, (self.depthwise_conv.kernel_size[0] - 1, 0))
+
+ # 1D Depthwise Conv
+ hidden_states = self.depthwise_conv(hidden_states)
+ hidden_states = self.depthwise_layer_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
+ hidden_states = self.activation(hidden_states)
+
+ hidden_states = self.pointwise_conv2(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = hidden_states.transpose(1, 2)
+ return hidden_states
+
+
+class SeamlessM4Tv2ConformerSelfAttention(nn.Module):
+ """Construct a SeamlessM4Tv2ConformerSelfAttention object.
+ Can be enhanced with relative position embeddings.
+ """
+
+ def __init__(self, config, use_position_embeddings=True):
+ super().__init__()
+
+ self.head_size = config.hidden_size // config.speech_encoder_attention_heads
+ self.num_heads = config.speech_encoder_attention_heads
+ self.position_embeddings_type = config.position_embeddings_type if use_position_embeddings else None
+
+ self.linear_q = nn.Linear(config.hidden_size, config.hidden_size)
+ self.linear_k = nn.Linear(config.hidden_size, config.hidden_size)
+ self.linear_v = nn.Linear(config.hidden_size, config.hidden_size)
+ self.linear_out = nn.Linear(config.hidden_size, config.hidden_size)
+
+ self.dropout = nn.Dropout(p=config.speech_encoder_dropout)
+
+ if self.position_embeddings_type == "relative_key":
+ self.left_max_position_embeddings = config.left_max_position_embeddings
+ self.right_max_position_embeddings = config.right_max_position_embeddings
+ num_positions = self.left_max_position_embeddings + self.right_max_position_embeddings + 1
+ self.distance_embedding = nn.Embedding(num_positions, self.head_size)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ # self-attention mechanism
+ batch_size, sequence_length, hidden_size = hidden_states.size()
+
+ # make sure query/key states can be != value states
+ query_key_states = hidden_states
+ value_states = hidden_states
+
+ # project query_key_states and value_states
+ query = self.linear_q(query_key_states).view(batch_size, -1, self.num_heads, self.head_size)
+ key = self.linear_k(query_key_states).view(batch_size, -1, self.num_heads, self.head_size)
+ value = self.linear_v(value_states).view(batch_size, -1, self.num_heads, self.head_size)
+
+ # => (batch, head, time1, d_k)
+ query = query.transpose(1, 2)
+ key = key.transpose(1, 2)
+ value = value.transpose(1, 2)
+
+ attn_weights = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.head_size)
+
+ if self.position_embeddings_type == "relative_key":
+ query_length, key_length = query.shape[2], key.shape[2]
+
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
+ distance = position_ids_r - position_ids_l
+ distance = torch.clamp(distance, -self.left_max_position_embeddings, self.right_max_position_embeddings)
+
+ positional_embedding = self.distance_embedding(distance + self.left_max_position_embeddings)
+ positional_embedding = positional_embedding.to(dtype=query.dtype) # fp16 compatibility
+
+ relative_position_attn_weights = torch.einsum("bhld,lrd->bhlr", query, positional_embedding)
+ attn_weights = attn_weights + (relative_position_attn_weights / math.sqrt(self.head_size))
+
+ # apply attention_mask if necessary
+ if attention_mask is not None:
+ attn_weights = attn_weights + attention_mask
+
+ # => (batch, head, time1, time2)
+ attn_weights = torch.softmax(attn_weights, dim=-1)
+ attn_weights = self.dropout(attn_weights)
+
+ # => (batch, head, time1, d_k)
+ attn_output = torch.matmul(attn_weights, value)
+
+ # => (batch, time1, hidden_size)
+ attn_output = attn_output.transpose(1, 2).reshape(batch_size, -1, self.num_heads * self.head_size)
+ attn_output = self.linear_out(attn_output)
+
+ if not output_attentions:
+ attn_weights = None
+
+ return attn_output, attn_weights
+
+
+class SeamlessM4Tv2ConformerEncoderLayer(nn.Module):
+ """Conformer block based on https://arxiv.org/abs/2005.08100."""
+
+ # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerEncoderLayer.__init__ with Wav2Vec2->SeamlessM4Tv2, attention_dropout->speech_encoder_dropout, torch.nn->nn
+ def __init__(self, config):
+ super().__init__()
+ embed_dim = config.hidden_size
+ dropout = config.speech_encoder_dropout
+
+ # Feed-forward 1
+ self.ffn1_layer_norm = nn.LayerNorm(embed_dim)
+ self.ffn1 = SeamlessM4Tv2ConformerFeedForward(config)
+
+ # Self-Attention
+ self.self_attn_layer_norm = nn.LayerNorm(embed_dim)
+ self.self_attn_dropout = nn.Dropout(dropout)
+ self.self_attn = SeamlessM4Tv2ConformerSelfAttention(config)
+
+ # Conformer Convolution
+ self.conv_module = SeamlessM4Tv2ConformerConvolutionModule(config)
+
+ # Feed-forward 2
+ self.ffn2_layer_norm = nn.LayerNorm(embed_dim)
+ self.ffn2 = SeamlessM4Tv2ConformerFeedForward(config)
+ self.final_layer_norm = nn.LayerNorm(embed_dim)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ conv_attention_mask: Optional[torch.Tensor] = None,
+ ):
+ hidden_states = hidden_states
+
+ # 1. Feed-Forward 1 layer
+ residual = hidden_states
+ hidden_states = self.ffn1_layer_norm(hidden_states)
+ hidden_states = self.ffn1(hidden_states)
+ hidden_states = hidden_states * 0.5 + residual
+ residual = hidden_states
+
+ # 2. Self-Attention layer
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+ hidden_states, attn_weights = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = self.self_attn_dropout(hidden_states)
+ hidden_states = hidden_states + residual
+
+ # 3. Convolutional Layer
+ residual = hidden_states
+ hidden_states = self.conv_module(hidden_states, attention_mask=conv_attention_mask)
+ hidden_states = residual + hidden_states
+
+ # 4. Feed-Forward 2 Layer
+ residual = hidden_states
+ hidden_states = self.ffn2_layer_norm(hidden_states)
+ hidden_states = self.ffn2(hidden_states)
+ hidden_states = hidden_states * 0.5 + residual
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ return hidden_states, attn_weights
+
+
+class SeamlessM4Tv2ConformerEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+
+ self.dropout = nn.Dropout(config.speech_encoder_dropout)
+ self.layers = nn.ModuleList(
+ [SeamlessM4Tv2ConformerEncoderLayer(config) for _ in range(config.speech_encoder_layers)]
+ )
+
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ self.gradient_checkpointing = False
+
+ def _apply_chunk_attention(self, attention_mask, hidden_states):
+ """
+ Creates a chunk attention mask. It creates a mask to prevent attention across chunks, ensuring that each
+ position attends only to positions within its own chunk. If a left chunk overlap is specified
+ (`speech_encoder_chunk_size` in the configuration), the attention mask is adjusted accordingly to allow each
+ position to also attends the `speech_encoder_chunk_size - 1` previous chunks.
+ """
+ sequence_len = hidden_states.shape[1]
+
+ chunk_indices = torch.arange(sequence_len, device=hidden_states.device)
+ chunk_indices = torch.div(chunk_indices, self.config.speech_encoder_chunk_size).long()
+
+ start_indices = torch.full_like(chunk_indices, 0)
+ if self.config.speech_encoder_left_chunk_num >= 0:
+ start_indices = (chunk_indices - self.config.speech_encoder_left_chunk_num).clamp_(min=0)
+ start_indices = start_indices * self.config.speech_encoder_chunk_size
+ start_indices = start_indices
+ start_indices = start_indices.unsqueeze(1).expand(-1, sequence_len)
+
+ end_indices = ((chunk_indices + 1) * self.config.speech_encoder_chunk_size).clamp_(max=sequence_len)
+
+ end_indices = end_indices.unsqueeze(1).expand(-1, sequence_len)
+
+ indices = torch.arange(sequence_len, device=hidden_states.device).unsqueeze(0).expand(sequence_len, -1)
+
+ chunk_mask = (indices < start_indices) | (indices >= end_indices)
+ chunk_mask = chunk_mask.unsqueeze(0).unsqueeze(0)
+
+ attention_mask = chunk_mask if attention_mask is None else (attention_mask.bool() | chunk_mask)
+ attention_mask = attention_mask.to(dtype=hidden_states.dtype)
+ return attention_mask
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ output_attentions=False,
+ output_hidden_states=False,
+ return_dict=True,
+ ):
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ conv_attention_mask = attention_mask
+ if attention_mask is not None:
+ # make sure padded tokens output 0
+ hidden_states = hidden_states.masked_fill(~attention_mask.bool().unsqueeze(-1), 0.0)
+ # extend attention_mask
+ attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
+ attention_mask = attention_mask.expand(
+ attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
+ )
+
+ if self.config.speech_encoder_chunk_size is not None:
+ attention_mask = self._apply_chunk_attention(attention_mask, hidden_states)
+
+ if attention_mask is not None:
+ attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min
+
+ hidden_states = self.dropout(hidden_states)
+
+ deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
+
+ for i, layer in enumerate(self.layers):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ dropout_probability = torch.rand([])
+
+ skip_the_layer = (
+ True if self.training and (dropout_probability < self.config.speech_encoder_layerdrop) else False
+ )
+ if not skip_the_layer or deepspeed_zero3_is_enabled:
+ # under deepspeed zero3 all gpus must run in sync
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer.__call__,
+ hidden_states,
+ attention_mask,
+ )
+ else:
+ layer_outputs = layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ conv_attention_mask=conv_attention_mask,
+ )
+ hidden_states = layer_outputs[0]
+
+ if skip_the_layer:
+ layer_outputs = (None, None)
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+
+ hidden_states = self.layer_norm(hidden_states)
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+
+# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TConformerAdapterLayer with SeamlessM4T->SeamlessM4Tv2
+class SeamlessM4Tv2ConformerAdapterLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ embed_dim = config.hidden_size
+ dropout = config.adaptor_dropout
+
+ self.kernel_size = config.adaptor_kernel_size
+ self.stride = config.adaptor_stride
+
+ # 1. residual convolution
+ self.residual_layer_norm = nn.LayerNorm(embed_dim)
+ self.residual_conv = nn.Conv1d(
+ embed_dim,
+ 2 * embed_dim,
+ self.kernel_size,
+ stride=self.stride,
+ padding=self.stride // 2,
+ )
+ self.activation = nn.GLU(dim=1)
+
+ # Self-Attention
+ self.self_attn_layer_norm = nn.LayerNorm(embed_dim)
+ self.self_attn_conv = nn.Conv1d(
+ embed_dim,
+ 2 * embed_dim,
+ self.kernel_size,
+ stride=self.stride,
+ padding=self.stride // 2,
+ )
+ self.self_attn = SeamlessM4Tv2ConformerSelfAttention(config, use_position_embeddings=False)
+ self.self_attn_dropout = nn.Dropout(dropout)
+
+ # Feed-forward
+ self.ffn_layer_norm = nn.LayerNorm(embed_dim)
+ self.ffn = SeamlessM4Tv2ConformerFeedForward(config, act_fn="relu", dropout=dropout)
+
+ def _compute_sub_sample_lengths_from_attention_mask(self, attention_mask):
+ pad = self.kernel_size // 2
+ seq_lens = attention_mask.size(1) - (1 - attention_mask.int()).sum(1)
+
+ seq_lens = ((seq_lens + 2 * pad - self.kernel_size) / self.stride) + 1
+
+ return seq_lens.floor()
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ):
+ residual = self.residual_layer_norm(hidden_states)
+
+ # Apply pooling to the residual to match the sequence length of the
+ # multi-head attention output.
+ # (batch, seq_len, feature_dim) -> (batch, feature_dim, seq_len)
+ residual = residual.transpose(1, 2)
+ residual = self.residual_conv(residual)
+ residual = self.activation(residual)
+ # (batch, feature_dim, seq_len) -> (batch, seq_len, feature_dim)
+ residual = residual.transpose(1, 2)
+
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+ # Apply pooling before feeding to the multihead-attention layer.
+ # (batch, seq_len, feature_dim) -> (batch, feature_dim, seq_len)
+ hidden_states = hidden_states.transpose(1, 2)
+ hidden_states = self.self_attn_conv(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ # (batch, feature_dim, seq_len) -> (batch, seq_len, feature_dim)
+ hidden_states = hidden_states.transpose(1, 2)
+
+ if attention_mask is not None:
+ sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to(
+ hidden_states.device
+ )
+ attention_mask = _compute_new_attention_mask(hidden_states=hidden_states, seq_lens=sub_sampled_lengths)
+ attention_mask = _prepare_4d_attention_mask(
+ attention_mask,
+ hidden_states.dtype,
+ )
+
+ # The rest of the computation is identical to a vanilla Transformer
+ # encoder layer.
+ hidden_states, attn_weigths = self.self_attn(
+ hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = self.self_attn_dropout(hidden_states)
+ hidden_states = hidden_states + residual
+
+ residual = hidden_states
+
+ hidden_states = self.ffn_layer_norm(hidden_states)
+ hidden_states = self.ffn(hidden_states) + residual
+
+ return hidden_states
+
+
+# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TConformerAdapter with SeamlessM4T->SeamlessM4Tv2
+class SeamlessM4Tv2ConformerAdapter(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+
+ self.layers = nn.ModuleList(
+ SeamlessM4Tv2ConformerAdapterLayer(config) for _ in range(config.num_adapter_layers)
+ )
+
+ def forward(self, hidden_states, attention_mask):
+ # down project hidden_states if necessary
+
+ for layer in self.layers:
+ hidden_states = layer(hidden_states, attention_mask)
+
+ return hidden_states
+
+
+############ TEXT / UNITS related code ################
+
+
+# Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding
+class SeamlessM4Tv2SinusoidalPositionalEmbedding(nn.Module):
+ """This module produces sinusoidal positional embeddings of any length."""
+
+ def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None):
+ super().__init__()
+ self.offset = 2
+ self.embedding_dim = embedding_dim
+ self.padding_idx = padding_idx
+ self.make_weights(num_positions + self.offset, embedding_dim, padding_idx)
+
+ def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
+ emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx)
+ if hasattr(self, "weights"):
+ # in forward put the weights on the correct dtype and device of the param
+ emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device)
+
+ self.register_buffer("weights", emb_weights, persistent=False)
+
+ @staticmethod
+ def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
+ """
+ Build sinusoidal embeddings.
+
+ This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of
+ "Attention Is All You Need".
+ """
+ half_dim = embedding_dim // 2
+ emb = math.log(10000) / (half_dim - 1)
+ emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb)
+ emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0)
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
+ if embedding_dim % 2 == 1:
+ # zero pad
+ emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
+ if padding_idx is not None:
+ emb[padding_idx, :] = 0
+
+ return emb.to(torch.get_default_dtype())
+
+ @torch.no_grad()
+ def forward(
+ self, input_ids: torch.Tensor = None, inputs_embeds: torch.Tensor = None, past_key_values_length: int = 0
+ ):
+ if input_ids is not None:
+ bsz, seq_len = input_ids.size()
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
+ position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to(
+ input_ids.device
+ )
+ else:
+ bsz, seq_len = inputs_embeds.size()[:-1]
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds, past_key_values_length)
+
+ # expand embeddings if needed
+ max_pos = self.padding_idx + 1 + seq_len + past_key_values_length
+ if max_pos > self.weights.size(0):
+ self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx)
+
+ return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, self.weights.shape[-1]).detach()
+
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds, past_key_values_length):
+ """
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
+
+ Args:
+ inputs_embeds: torch.Tensor
+
+ Returns: torch.Tensor
+ """
+ input_shape = inputs_embeds.size()[:-1]
+ sequence_length = input_shape[1]
+
+ position_ids = torch.arange(
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
+ )
+ return position_ids.unsqueeze(0).expand(input_shape).contiguous() + past_key_values_length
+
+
+class SeamlessM4Tv2Attention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ # Copied from transformers.models.bart.modeling_bart.BartAttention.__init__ with Bart->SeamlessM4Tv2
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ is_decoder: bool = False,
+ bias: bool = True,
+ is_causal: bool = False,
+ config: Optional[SeamlessM4Tv2Config] = None,
+ ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.dropout = dropout
+ self.head_dim = embed_dim // num_heads
+ self.config = config
+
+ if (self.head_dim * num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+ self.is_causal = is_causal
+
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+
+ def _shape(self, projection: torch.Tensor) -> torch.Tensor:
+ new_projection_shape = projection.size()[:-1] + (self.num_heads, self.head_dim)
+ # move heads to 2nd position (B, T, H * D) -> (B, T, H, D) -> (B, H, T, D)
+ new_projection = projection.view(new_projection_shape).permute(0, 2, 1, 3)
+ return new_projection
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ is_cross_attention = encoder_hidden_states is not None
+ batch_size, seq_length = hidden_states.shape[:2]
+
+ # use encoder_hidden_states if cross attention
+ current_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states
+ # checking that the `sequence_length` of the `past_key_value` is the same as the he provided
+ # `encoder_hidden_states` to support prefix tuning
+ if is_cross_attention and past_key_value and past_key_value[0].shape[2] == current_states.shape[1]:
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ else:
+ key_states = self._shape(self.k_proj(current_states))
+ value_states = self._shape(self.v_proj(current_states))
+ if past_key_value is not None and not is_cross_attention:
+ # reuse k, v, self_attention
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
+
+ query_states = self._shape(self.q_proj(hidden_states) * self.scaling)
+ attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2))
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ if attention_mask is not None:
+ attention_scores = attention_scores + attention_mask
+
+ # (batch_size, n_heads, seq_length, key_length)
+ attn_weights = nn.functional.softmax(attention_scores.float(), dim=-1).type_as(attention_scores)
+ attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ # attn_output = torch.bmm(attn_probs, value_states) ?
+ context_states = torch.matmul(attn_weights, value_states)
+ # attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) ?
+ context_states = context_states.permute(0, 2, 1, 3).contiguous().view(batch_size, seq_length, -1)
+ attn_output = self.out_proj(context_states)
+
+ if output_attentions:
+ return attn_output, attn_weights, past_key_value
+ else:
+ return attn_output, None, past_key_value
+
+
+# Copied from transformers.models.nllb_moe.modeling_nllb_moe.NllbMoeDenseActDense with NllbMoe->SeamlessM4Tv2,DenseActDense->FeedForwardNetwork, d_model->hidden_size
+class SeamlessM4Tv2FeedForwardNetwork(nn.Module):
+ def __init__(self, config: SeamlessM4Tv2Config, ffn_dim: int):
+ super().__init__()
+ self.fc1 = nn.Linear(config.hidden_size, ffn_dim)
+ self.fc2 = nn.Linear(ffn_dim, config.hidden_size)
+ self.dropout = nn.Dropout(config.activation_dropout)
+ self.act = ACT2FN[config.activation_function]
+
+ def forward(self, hidden_states):
+ hidden_states = self.fc1(hidden_states)
+ hidden_states = self.act(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ if (
+ isinstance(self.fc2.weight, torch.Tensor)
+ and hidden_states.dtype != self.fc2.weight.dtype
+ and (self.fc2.weight.dtype != torch.int8 and self.fc2.weight.dtype != torch.uint8)
+ ):
+ hidden_states = hidden_states.to(self.fc2.weight.dtype)
+ hidden_states = self.fc2(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TEncoderLayer with SeamlessM4T->SeamlessM4Tv2
+class SeamlessM4Tv2EncoderLayer(nn.Module):
+ def __init__(self, config: SeamlessM4Tv2Config, encoder_ffn_dim=None, encoder_attention_heads=None):
+ super().__init__()
+ encoder_ffn_dim = config.encoder_ffn_dim if encoder_ffn_dim is None else encoder_ffn_dim
+ encoder_attention_heads = (
+ config.encoder_attention_heads if encoder_attention_heads is None else encoder_attention_heads
+ )
+
+ self.embed_dim = config.hidden_size
+ self.self_attn = SeamlessM4Tv2Attention(
+ embed_dim=self.embed_dim,
+ num_heads=encoder_attention_heads,
+ dropout=config.attention_dropout,
+ )
+ self.attn_dropout = nn.Dropout(config.dropout)
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ self.ffn = SeamlessM4Tv2FeedForwardNetwork(config, ffn_dim=encoder_ffn_dim)
+
+ self.ffn_layer_norm = nn.LayerNorm(config.hidden_size)
+ self.ffn_dropout = nn.Dropout(config.activation_dropout)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: torch.Tensor,
+ output_attentions: bool = False,
+ ) -> torch.Tensor:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`):
+ input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`):
+ attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very
+ large negative values.
+ """
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+ hidden_states, attn_weights, _ = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = self.attn_dropout(hidden_states)
+ hidden_states = residual + hidden_states
+
+ residual = hidden_states
+
+ hidden_states = self.ffn_layer_norm(hidden_states)
+
+ hidden_states = self.ffn(hidden_states)
+ hidden_states = self.ffn_dropout(hidden_states)
+
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TDecoderLayer with SeamlessM4T->SeamlessM4Tv2
+class SeamlessM4Tv2DecoderLayer(nn.Module):
+ def __init__(self, config: SeamlessM4Tv2Config, decoder_ffn_dim=None, decoder_attention_heads=None):
+ super().__init__()
+ decoder_ffn_dim = config.decoder_ffn_dim if decoder_ffn_dim is None else decoder_ffn_dim
+ decoder_attention_heads = (
+ config.decoder_attention_heads if decoder_attention_heads is None else decoder_attention_heads
+ )
+
+ self.embed_dim = config.hidden_size
+ self.self_attn = SeamlessM4Tv2Attention(
+ embed_dim=self.embed_dim,
+ num_heads=decoder_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=True,
+ )
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.attn_dropout = nn.Dropout(config.dropout)
+
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.cross_attention = SeamlessM4Tv2Attention(
+ self.embed_dim, decoder_attention_heads, config.attention_dropout, is_decoder=True
+ )
+ self.cross_attention_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ self.ffn = SeamlessM4Tv2FeedForwardNetwork(config, ffn_dim=decoder_ffn_dim)
+
+ self.ffn_layer_norm = nn.LayerNorm(config.hidden_size)
+ self.ffn_dropout = nn.Dropout(config.activation_dropout)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = True,
+ ) -> torch.Tensor:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`):
+ input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`):
+ attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very
+ large negative values.
+ encoder_hidden_states (`torch.FloatTensor`):
+ cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
+ encoder_attention_mask (`torch.FloatTensor`):
+ encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by
+ very large negative values.
+ past_key_value (`Tuple(torch.FloatTensor)`):
+ cached past key and value projection states
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Self Attention
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ past_key_value=self_attn_past_key_value,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = self.attn_dropout(hidden_states)
+ hidden_states = residual + hidden_states
+
+ # Cross-Attention Block
+ cross_attn_present_key_value = None
+ cross_attn_weights = None
+ if encoder_hidden_states is not None:
+ residual = hidden_states
+ hidden_states = self.cross_attention_layer_norm(hidden_states)
+
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.cross_attention(
+ hidden_states=hidden_states,
+ encoder_hidden_states=encoder_hidden_states,
+ past_key_value=cross_attn_past_key_value,
+ attention_mask=encoder_attention_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = self.attn_dropout(hidden_states)
+ hidden_states = residual + hidden_states
+
+ # add cross-attn to positions 3,4 of present_key_value tuple
+ present_key_value += cross_attn_present_key_value
+
+ # Fully Connected
+ residual = hidden_states
+
+ hidden_states = self.ffn_layer_norm(hidden_states)
+
+ hidden_states = self.ffn(hidden_states)
+ hidden_states = self.ffn_dropout(hidden_states)
+
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states, present_key_value)
+
+ if output_attentions:
+ outputs += (self_attn_weights, cross_attn_weights)
+
+ return outputs
+
+
+class SeamlessM4Tv2TextToUnitDecoderLayer(nn.Module):
+ def __init__(self, config: SeamlessM4Tv2Config, decoder_ffn_dim=None, decoder_attention_heads=None):
+ super().__init__()
+ decoder_ffn_dim = config.decoder_ffn_dim if decoder_ffn_dim is None else decoder_ffn_dim
+ decoder_attention_heads = (
+ config.decoder_attention_heads if decoder_attention_heads is None else decoder_attention_heads
+ )
+ self.dropout = config.dropout
+ self.embed_dim = config.hidden_size
+
+ self.self_attn = SeamlessM4Tv2Attention(
+ embed_dim=self.embed_dim,
+ num_heads=decoder_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=True,
+ )
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ self.conv1 = nn.Conv1d(self.embed_dim, self.embed_dim, kernel_size=7, stride=1, padding="same")
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.conv2 = nn.Conv1d(self.embed_dim, self.embed_dim, kernel_size=7, stride=1, padding="same")
+
+ self.conv_layer_norm = nn.LayerNorm(config.hidden_size)
+ self.conv_dropout = nn.Dropout(self.dropout)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ padding_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> torch.Tensor:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`):
+ input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`):
+ attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very
+ large negative values.
+ padding_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indicates which inputs are to be ignored due to padding, where elements are either 1 for *not masked*
+ or 0 for *masked*
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+
+ # Self Attention
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = residual + hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Conv
+ residual = hidden_states
+
+ # Apply padding mask to avoid leaking padded positions in the convolution layer
+ if padding_mask is not None:
+ hidden_states = hidden_states.masked_fill(~padding_mask.bool().unsqueeze(-1), 0.0)
+ hidden_states = self.conv1(hidden_states.transpose(1, 2)).transpose(1, 2)
+
+ if padding_mask is not None:
+ hidden_states = hidden_states.masked_fill(~padding_mask.bool().unsqueeze(-1), 0.0)
+
+ hidden_states = self.activation_fn(hidden_states)
+ hidden_states = self.conv2(hidden_states.transpose(1, 2)).transpose(1, 2)
+
+ hidden_states = self.conv_dropout(hidden_states)
+ hidden_states = residual + hidden_states
+ hidden_states = self.conv_layer_norm(hidden_states)
+
+ outputs = (hidden_states, present_key_value)
+
+ if output_attentions:
+ outputs += self_attn_weights
+
+ return outputs
+
+
+############ SUB-MODELS related code ################
+
+
+class SeamlessM4Tv2PreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = SeamlessM4Tv2Config
+ base_model_prefix = "seamless_m4t_v2"
+ supports_gradient_checkpointing = True
+ _no_split_modules = [
+ "SeamlessM4Tv2EncoderLayer",
+ "SeamlessM4Tv2DecoderLayer",
+ "SeamlessM4Tv2ConformerEncoderLayer",
+ "SeamlessM4Tv2TextToUnitDecoderLayer",
+ ]
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ std = self.config.initializer_range
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, SeamlessM4Tv2ConformerSelfAttention):
+ if hasattr(module, "pos_bias_u"):
+ nn.init.xavier_uniform_(module.pos_bias_u)
+ if hasattr(module, "pos_bias_v"):
+ nn.init.xavier_uniform_(module.pos_bias_v)
+ elif isinstance(module, SeamlessM4Tv2ConformerFeatureProjection):
+ k = math.sqrt(1 / module.projection.in_features)
+ nn.init.uniform_(module.projection.weight, a=-k, b=k)
+ nn.init.uniform_(module.projection.bias, a=-k, b=k)
+ elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+ elif isinstance(module, (nn.Conv1d, nn.ConvTranspose1d)):
+ nn.init.kaiming_normal_(module.weight)
+ if module.bias is not None:
+ k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
+ nn.init.uniform_(module.bias, a=-k, b=k)
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TPreTrainedModel._compute_sub_sample_lengths_from_attention_mask
+ def _compute_sub_sample_lengths_from_attention_mask(self, attention_mask):
+ kernel_size, stride = self.config.adaptor_kernel_size, self.config.adaptor_stride
+ pad = kernel_size // 2
+ seq_lens = attention_mask.size(1) - (1 - attention_mask.int()).sum(1)
+
+ seq_lens = ((seq_lens + 2 * pad - kernel_size) / stride) + 1
+
+ return seq_lens.floor()
+
+ def _indices_to_subwords(self, input_ids):
+ """
+ Returns the corresponding text string for each input id.
+ """
+ if not hasattr(self.generation_config, "id_to_text"):
+ raise ValueError(
+ """This model generation config doesn't have a `id_to_text` key which maps
+ token ids to subwords. Make sure to load the right generation config."""
+ )
+ batch_size, sequence_len = input_ids.shape
+
+ subwords_batch = []
+ for batch_id in range(batch_size):
+ subwords = []
+ for i in range(sequence_len):
+ subword = self.generation_config.id_to_text.get(str(input_ids[batch_id, i].item()))
+ subwords.append(str(subword))
+ subwords_batch.append(subwords)
+ return subwords_batch
+
+ def _count_character_length_in_subword(
+ self,
+ input_ids,
+ subwords_batch,
+ merge_space_with_prev_subword=False,
+ pad_token_id=0,
+ unk_token_id=1,
+ space="▁",
+ ):
+ """
+ Counts the number of characters per text string associated with the input token id.
+
+ Args:
+ input_ids (`torch.Tensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary.
+ subwords_batch (`List[List[str]]` of shape `(batch_size, sequence_length)`):
+ Corresponding text string for each input id.
+ merge_space_with_prev_subword (`bool`, *optional*, defaults to `False`):
+ Indicates if the space character is merged with the previous subword. If `False`, it will be merged
+ with the next subword.
+ pad_token_id (`int`, *optional*, defaults to 0):
+ The id of the _padding_ text token. If it is encountered when calculating the length of a subword
+ sample, the lengths of subsequent subwords will be set to 0.
+ unk_token_id (`int`, *optional*, defaults to 1):
+ The id of the _unknown_ text token. Associated to a subword of length 1.
+ space (`str`, *optional*, defaults to `"▁"`):
+ The space character.
+ """
+ batch_size, _ = input_ids.shape
+
+ char_count_per_id = input_ids.new_zeros(input_ids.size())
+
+ subword_lens = input_ids.ne(pad_token_id).sum(1)
+
+ for batch_id in range(batch_size):
+ # We slice out the tensor till the padding index.
+ subword_indices = input_ids[batch_id, : subword_lens[batch_id]]
+ subwords = subwords_batch[batch_id][: subword_lens[batch_id]]
+
+ is_next_start_with_space = [
+ len(subwords[i + 1]) > 1 and subwords[i + 1][0] == space if i < len(subwords) - 1 else False
+ for i in range(len(subwords))
+ ]
+ is_punc = [
+ len(subwords[i]) == 1
+ and not subwords[i].isalpha()
+ and not subwords[i].isnumeric()
+ and subwords[i] != space
+ for i in range(len(subwords))
+ ]
+ for i, (subword_idx, subword) in enumerate(zip(subword_indices, subwords)):
+ if subword_idx == pad_token_id:
+ break
+
+ if subword_idx == unk_token_id:
+ # We set char_len to 1 for an unk token.
+ char_len = 1
+
+ if merge_space_with_prev_subword and is_next_start_with_space[i]:
+ char_len += 1
+ else:
+ # By default, spaces are merged with the next subword.
+ # char_len includes the space.
+ char_len = len(subword)
+
+ if merge_space_with_prev_subword:
+ # Add the space for the next subword.
+ if is_next_start_with_space[i]:
+ char_len += 1
+ # Subtract the space for the current subword.
+ if i > 0 and is_next_start_with_space[i - 1]:
+ char_len -= 1
+ else:
+ # Merge space with punctuation mark by default.
+ if is_punc[i] and is_next_start_with_space[i]:
+ char_len += 1
+ # Subtract the space for the subword succeeding the punctuation mark.
+ elif i > 0 and is_punc[i - 1] and is_next_start_with_space[i - 1]:
+ char_len -= 1
+
+ char_count_per_id[batch_id, i] = char_len
+
+ return char_count_per_id
+
+ def _get_char_input_ids(self, input_ids, subwords_batch, char_count_per_id, pad_token_id=0, unk_token_id=1):
+ """
+ Returns the corresponding character input id for each character of `subwords_batch`.
+
+ Args:
+ input_ids (`torch.Tensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary.
+ subwords_batch (`List[List[str]]` of shape `(batch_size, sequence_length)`):
+ Corresponding text string for each input id.
+ char_count_per_id (`torch.Tensor` of shape `(batch_size, sequence_length)`):
+ Number of characters per input id.
+ pad_token_id (`int`, *optional*, defaults to 0):
+ The id of the _padding_ text token. If it is encountered when calculating the length of a subword
+ sample, the lengths of subsequent subwords will be set to 0.
+ unk_token_id (`int`, *optional*, defaults to 1):
+ The id of the _unknown_ text token. Associated to a subword of length 1.
+ Returns:
+ `torch.Tensor`: Tensor of shape `(batch_size, char_sequence_length)` containing the id of each character.
+ """
+ if not hasattr(self.generation_config, "char_to_id"):
+ raise ValueError(
+ """This model generation config doesn't have a `char_to_id` key which maps
+ characters to character ids. Make sure to load the right generation config."""
+ )
+
+ batch_size = input_ids.shape[0]
+ max_len = int(char_count_per_id.sum(1).max().item())
+
+ char_seqs = input_ids.new_zeros((batch_size, max_len)).fill_(pad_token_id)
+
+ subword_lens = input_ids.ne(pad_token_id).sum(1)
+
+ for batch_id in range(batch_size):
+ total = 0
+ subword_indices = input_ids[batch_id, : subword_lens[batch_id]]
+ subwords = subwords_batch[batch_id][: subword_lens[batch_id]]
+ for subword_idx, subword in zip(subword_indices, subwords):
+ if subword_idx == unk_token_id:
+ char_ids = [unk_token_id]
+ else:
+ # Get char token indices corresponding to the subwords.
+ char_ids = [self.generation_config.char_to_id.get(ch, unk_token_id) for ch in list(subword)]
+ char_seq_len = len(char_ids)
+ char_seqs[batch_id, total : total + char_seq_len] = torch.tensor(char_ids).to(char_seqs)
+ total += char_seq_len
+ return char_seqs
+
+ def _hard_upsample(self, hidden_states, durations):
+ """
+ Repeats the time dimension of each sample in the batch based on the corresponding duration.
+
+ Args:
+ hidden_states (`torch.Tensor` of shape `(batch_size, sequence_length, *)`, *optional*):
+ The sequence to repeat, where `*` is any number of sequence-specific dimensions including none.
+ durations (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indicates how many times to repeat time segments.
+ """
+ if hidden_states.size(0) == 1:
+ hidden_states = torch.repeat_interleave(hidden_states, durations.view(-1), dim=1)
+ else:
+ # if batched sample, need to interleave per sample, and pad -> loss of parallelism
+ if hidden_states.shape[0] > 1 and self.training:
+ logger.warning_once(
+ """`self.training=True` and you use batching. You lose parallelism during the hifigan
+ forward pass because the samples are interleaved."""
+ )
+ hidden_states = [
+ torch.repeat_interleave(hidden_state, duration, dim=0)
+ for (hidden_state, duration) in zip(hidden_states, durations)
+ ]
+
+ hidden_states = nn.utils.rnn.pad_sequence(hidden_states, batch_first=True)
+
+ return hidden_states
+
+
+@add_start_docstrings(
+ """Transformer speech encoder consisting of *config.speech_encoder_layers* conformer self attention layers.
+ Each layer is a [`SeamlessM4Tv2ConformerEncoderLayer`].""",
+ SEAMLESS_M4T_V2_START_DOCSTRING,
+)
+# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TSpeechEncoder with SeamlessM4T->SeamlessM4Tv2
+class SeamlessM4Tv2SpeechEncoder(SeamlessM4Tv2PreTrainedModel):
+ main_input_name = "input_features"
+
+ def __init__(self, config: SeamlessM4Tv2Config):
+ super().__init__(config)
+
+ self.feature_projection = SeamlessM4Tv2ConformerFeatureProjection(config)
+ self.encoder = SeamlessM4Tv2ConformerEncoder(config)
+ self.intermediate_ffn = SeamlessM4Tv2ConformerFeedForward(config, act_fn="relu", dropout=0.0)
+ self.adapter = SeamlessM4Tv2ConformerAdapter(config) if config.add_adapter else None
+ self.inner_layer_norm = nn.LayerNorm(config.hidden_size)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def forward(
+ self,
+ input_features: Optional[torch.Tensor],
+ attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs,
+ ) -> Union[Tuple, Wav2Vec2BaseModelOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_features is None:
+ raise ValueError(
+ """Both `input_features` and `inputs_embeds` are `None` in `SeamlessM4Tv2SpeechEncoder.forward`.
+ Make sure one of them is not `None`."""
+ )
+
+ hidden_states = self.feature_projection(input_features)
+
+ encoder_outputs = self.encoder(
+ hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = encoder_outputs[0]
+
+ expanded_hidden_states = self.intermediate_ffn(hidden_states)
+ hidden_states = hidden_states + 0.5 * expanded_hidden_states
+
+ if self.adapter is not None:
+ hidden_states = self.adapter(hidden_states, attention_mask=attention_mask)
+
+ hidden_states = self.inner_layer_norm(hidden_states)
+
+ if not return_dict:
+ return (hidden_states,) + encoder_outputs[1:]
+
+ return Wav2Vec2BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+# inspired from MBart and NllbMoe
+@add_start_docstrings(
+ "Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`SeamlessM4Tv2EncoderLayer`].",
+ SEAMLESS_M4T_V2_START_DOCSTRING,
+ """
+ embed_tokens (`nn.Embedding`, *optional*):
+ Input embedding
+ is_t2u_encoder (`bool`, *optional*, defaults to `False`):
+ indicates if it belongs to the text-to-units model, in which case it won't have input embeddings
+ """,
+)
+# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TEncoder with SeamlessM4T->SeamlessM4Tv2
+class SeamlessM4Tv2Encoder(SeamlessM4Tv2PreTrainedModel):
+ def __init__(
+ self,
+ config: SeamlessM4Tv2Config,
+ embed_tokens: Optional[nn.Embedding] = None,
+ is_t2u_encoder: bool = False,
+ ):
+ super().__init__(config)
+
+ self.dropout = config.dropout
+ self.layerdrop = config.encoder_layerdrop
+ self.padding_idx = config.pad_token_id
+ embed_dim = config.hidden_size
+
+ self.is_t2u_encoder = is_t2u_encoder
+ self.max_source_positions = config.max_position_embeddings
+
+ if not self.is_t2u_encoder:
+ self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
+
+ if embed_tokens is not None:
+ self.embed_tokens.weight = embed_tokens.weight
+
+ self.embed_positions = SeamlessM4Tv2SinusoidalPositionalEmbedding(
+ self.max_source_positions,
+ embed_dim,
+ self.padding_idx,
+ )
+
+ layers = []
+ for _ in range(config.encoder_layers):
+ layers.append(
+ SeamlessM4Tv2EncoderLayer(
+ config,
+ encoder_attention_heads=config.encoder_attention_heads,
+ encoder_ffn_dim=config.encoder_ffn_dim,
+ )
+ )
+
+ self.layers = nn.ModuleList(layers)
+
+ self.layer_norm = nn.LayerNorm(config.hidden_size)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs,
+ ) -> Union[Tuple, BaseModelOutput]:
+ r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and self.is_t2u_encoder:
+ raise ValueError(
+ "You cannot pass input_ids to the encoder of the text_to_units model. Pass inputs_embeds instead."
+ )
+
+ # retrieve input_ids and inputs_embeds
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input = input_ids
+ input_shape = input.shape
+ input_ids = input_ids.view(-1, input_shape[-1])
+ elif inputs_embeds is not None:
+ input = inputs_embeds[:, :, -1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+
+ if not self.is_t2u_encoder:
+ embed_pos = self.embed_positions(input)
+
+ hidden_states = inputs_embeds + embed_pos.to(inputs_embeds.device)
+ else:
+ hidden_states = inputs_embeds
+
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ # expand attention_mask
+ if attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ to_drop = False
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop: # skip the layer
+ to_drop = True
+
+ if to_drop:
+ layer_outputs = (None, None)
+ else:
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ encoder_layer.forward,
+ hidden_states,
+ attention_mask,
+ output_attentions,
+ )
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ hidden_states = self.layer_norm(hidden_states)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+
+@add_start_docstrings(
+ "Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`SeamlessM4Tv2DecoderLayer`].",
+ SEAMLESS_M4T_V2_START_DOCSTRING,
+ """
+ embed_tokens (`nn.Embedding`, *optional*):
+ Input embedding
+ """,
+)
+# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TDecoder with SeamlessM4T->SeamlessM4Tv2
+class SeamlessM4Tv2Decoder(SeamlessM4Tv2PreTrainedModel):
+ def __init__(
+ self,
+ config: SeamlessM4Tv2Config,
+ embed_tokens: Optional[nn.Embedding] = None,
+ ):
+ super().__init__(config)
+ self.dropout = config.dropout
+ self.layerdrop = config.decoder_layerdrop
+ self.padding_idx = config.pad_token_id
+ self.vocab_size = config.vocab_size
+ self.max_target_positions = config.max_position_embeddings
+ self.embed_scale = math.sqrt(config.hidden_size) if config.scale_embedding else 1.0
+
+ if embed_tokens is not None:
+ # if embed_tokens defined, use its shape instead
+ self.embed_tokens = nn.Embedding(embed_tokens.num_embeddings, embed_tokens.embedding_dim, self.padding_idx)
+ self.embed_tokens.weight = embed_tokens.weight
+ else:
+ self.embed_tokens = nn.Embedding(self.vocab_size, config.hidden_size, self.padding_idx)
+
+ self.embed_positions = SeamlessM4Tv2SinusoidalPositionalEmbedding(
+ self.max_target_positions,
+ config.hidden_size,
+ padding_idx=self.padding_idx,
+ )
+
+ layers = []
+ for _ in range(config.decoder_layers):
+ layers.append(
+ SeamlessM4Tv2DecoderLayer(
+ config,
+ decoder_attention_heads=config.decoder_attention_heads,
+ decoder_ffn_dim=config.decoder_ffn_dim,
+ )
+ )
+ self.layers = nn.ModuleList(layers)
+ self.layer_norm = nn.LayerNorm(config.hidden_size)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.embed_tokens = value
+
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
+ r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
+ of the decoder.
+ encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
+ selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # retrieve input_ids and inputs_embeds
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
+ elif input_ids is not None:
+ input = input_ids
+ input_shape = input.size()
+ input_ids = input_ids.view(-1, input_shape[-1])
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ input = inputs_embeds[:, :, -1]
+ else:
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
+
+ # past_key_values_length
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+
+ attention_mask = _prepare_4d_causal_attention_mask(
+ attention_mask, input_shape, inputs_embeds, past_key_values_length
+ )
+
+ # expand encoder attention mask
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ encoder_attention_mask = _prepare_4d_attention_mask(
+ encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
+ )
+
+ # embed positions
+ positions = self.embed_positions(input, past_key_values_length=past_key_values_length)
+
+ hidden_states = inputs_embeds + positions.to(inputs_embeds.device)
+
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
+ next_decoder_cache = () if use_cache else None
+
+ for idx, decoder_layer in enumerate(self.layers):
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop:
+ continue
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ None,
+ output_attentions,
+ use_cache,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ next_decoder_cache += (layer_outputs[1],)
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[2],)
+
+ if encoder_hidden_states is not None:
+ all_cross_attentions += (layer_outputs[3],)
+
+ hidden_states = self.layer_norm(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ next_cache = next_decoder_cache if use_cache else None
+ if not return_dict:
+ return tuple(
+ v
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ "Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`SeamlessM4Tv2DecoderLayer`].",
+ SEAMLESS_M4T_V2_START_DOCSTRING,
+ """
+ embed_tokens (`nn.Embedding`, *optional*):
+ Input embedding
+ """,
+)
+class SeamlessM4Tv2TextToUnitDecoder(SeamlessM4Tv2PreTrainedModel):
+ def __init__(
+ self,
+ config: SeamlessM4Tv2Config,
+ embed_tokens: Optional[nn.Embedding] = None,
+ ):
+ super().__init__(config)
+ self.dropout = config.dropout
+ self.layerdrop = config.decoder_layerdrop
+ self.padding_idx = config.pad_token_id
+ self.vocab_size = config.vocab_size
+ self.max_target_positions = config.max_position_embeddings
+ self.embed_scale = math.sqrt(config.hidden_size) if config.scale_embedding else 1.0
+
+ if embed_tokens is not None:
+ # if embed_tokens defined, use its shape instead
+ self.embed_tokens = nn.Embedding(embed_tokens.num_embeddings, embed_tokens.embedding_dim, self.padding_idx)
+ self.embed_tokens.weight = embed_tokens.weight
+ else:
+ self.embed_tokens = nn.Embedding(self.vocab_size, config.hidden_size, self.padding_idx)
+
+ self.embed_char = nn.Embedding(config.char_vocab_size, config.hidden_size)
+ self.embed_char_positions = SeamlessM4Tv2SinusoidalPositionalEmbedding(
+ self.max_target_positions,
+ config.hidden_size,
+ padding_idx=self.padding_idx,
+ )
+
+ self.pos_emb_alpha_char = nn.Parameter(torch.ones(1))
+ self.pos_emb_alpha = nn.Parameter(torch.ones(1))
+ self.duration_predictor = SeamlessM4Tv2VariancePredictor(
+ config.variance_predictor_embed_dim,
+ config.variance_predictor_hidden_dim,
+ config.variance_predictor_kernel_size,
+ config.variance_pred_dropout,
+ )
+
+ self.embed_positions = SeamlessM4Tv2SinusoidalPositionalEmbedding(
+ self.max_target_positions,
+ config.hidden_size,
+ padding_idx=self.padding_idx,
+ )
+
+ layers = []
+ for _ in range(config.decoder_layers):
+ layers.append(
+ SeamlessM4Tv2TextToUnitDecoderLayer(
+ config,
+ decoder_attention_heads=config.decoder_attention_heads,
+ decoder_ffn_dim=config.decoder_ffn_dim,
+ )
+ )
+ self.layers = nn.ModuleList(layers)
+ self.layer_norm = nn.LayerNorm(config.hidden_size)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.embed_tokens = value
+
+ def forward(
+ self,
+ char_input_ids: torch.LongTensor = None,
+ char_count_per_id: torch.LongTensor = None,
+ encoder_hidden_states: torch.FloatTensor = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, SeamlessM4Tv2TextToUnitDecoderOutput]:
+ r"""
+ Args:
+ char_input_ids (`torch.LongTensor` of shape `(batch_size, char_sequence_length)`):
+ Character indices. The correspondence between characters and indices can be found in `char_to_id`, a
+ dictionary in the generation configuration.
+ char_count_per_id (`torch.Tensor` of shape `(batch_size, encoder_sequence_length)`):
+ Number of characters per text input id.
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
+ of the decoder.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # create padding mask for character lengths
+ char_padding_mask = _compute_new_attention_mask(char_input_ids, char_count_per_id.sum(1))
+
+ # upsample hidden states according to characters sequence lengths
+ char_hidden_states = self._hard_upsample(encoder_hidden_states, char_count_per_id)
+ # embed char positions
+ char_positions = self.pos_emb_alpha_char * self.embed_char_positions(inputs_embeds=char_hidden_states)
+ # update char hidden states with positions and char embeddings
+ char_hidden_states = self.embed_char(char_input_ids) * self.embed_scale + char_positions + char_hidden_states
+
+ # predict duration
+ log_dur_pred = self.duration_predictor(char_hidden_states, padding_mask=char_padding_mask)
+ dur_out = torch.clamp(torch.round((torch.exp(log_dur_pred) - 1)).long(), min=1)
+ dur_out = dur_out.masked_fill(~char_padding_mask.bool(), 0.0)
+
+ # upsample char hidden states according to predicted duration
+ char_hidden_states = self._hard_upsample(char_hidden_states, dur_out)
+
+ positions = self.pos_emb_alpha * self.embed_positions(inputs_embeds=char_hidden_states)
+ hidden_states = char_hidden_states + positions
+
+ padding_mask = _compute_new_attention_mask(hidden_states, dur_out.sum(1))
+ attention_mask = _prepare_4d_attention_mask(padding_mask, hidden_states.dtype)
+
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+
+ for idx, decoder_layer in enumerate(self.layers):
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop:
+ continue
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ padding_mask,
+ output_attentions,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ padding_mask=padding_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[2],)
+
+ hidden_states = self.layer_norm(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attns, padding_mask] if v is not None)
+ return SeamlessM4Tv2TextToUnitDecoderOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ padding_mask=padding_mask,
+ )
+
+
+@add_start_docstrings(
+ "Transformer bare text-to-unit encoder-decoder. The encoder is a [`SeamlessM4Tv2Encoder`] without embeddings and the decoder is a [`SeamlessM4Tv2TextToUnitDecoder`].",
+ SEAMLESS_M4T_V2_START_DOCSTRING,
+ """
+ embed_tokens_decoder (`nn.Embedding`, *optional*): input embedding of the decoder.
+ """,
+)
+class SeamlessM4Tv2TextToUnitModel(SeamlessM4Tv2PreTrainedModel):
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TTextToUnitModel.__init__ with SeamlessM4T->SeamlessM4Tv2, Decoder->TextToUnitDecoder
+ def __init__(
+ self,
+ config: SeamlessM4Tv2Config,
+ embed_tokens_decoder: Optional[nn.Embedding] = None,
+ ):
+ super().__init__(config)
+
+ self.encoder = SeamlessM4Tv2Encoder(config, is_t2u_encoder=True)
+ self.decoder = SeamlessM4Tv2TextToUnitDecoder(config, embed_tokens_decoder)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ char_input_ids: torch.LongTensor = None,
+ char_count_per_id: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], Seq2SeqModelOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if encoder_outputs is None:
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ # decoder outputs consists of (dec_features, dec_hidden, dec_attn, padding_mask)
+ decoder_outputs = self.decoder(
+ char_input_ids=char_input_ids,
+ char_count_per_id=char_count_per_id,
+ encoder_hidden_states=encoder_outputs[0],
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+
+ return SeamlessM4Tv2TextToUnitOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ padding_mask=decoder_outputs.padding_mask,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ "Transformer text-to-unit encoder-decoder with a language model head. The base encoder-decoder model is a [`SeamlessM4Tv2TextToUnitModel`].",
+ SEAMLESS_M4T_V2_START_DOCSTRING,
+ """
+ embed_tokens_decoder (`nn.Embedding`, *optional*): input embedding of the decoder.
+ """,
+)
+class SeamlessM4Tv2TextToUnitForConditionalGeneration(SeamlessM4Tv2PreTrainedModel):
+ _keys_to_ignore_on_load_missing = [
+ "vocoder",
+ "speech_encoder",
+ "text_encoder",
+ "text_decoder",
+ ]
+ _tied_weights_keys = ["decoder.embed_tokens.weight", "lm_head.weight"]
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TTextToUnitForConditionalGeneration.__init__ with SeamlessM4T->SeamlessM4Tv2
+ def __init__(
+ self,
+ config: SeamlessM4Tv2Config,
+ embed_tokens_decoder: Optional[nn.Embedding] = None,
+ ):
+ # update config - used principaly for bos_token_id etc.
+ config = copy.deepcopy(config)
+ for param, val in config.to_dict().items():
+ if param.startswith("t2u_"):
+ config.__setattr__(param[4:], val)
+ super().__init__(config)
+
+ self.model = SeamlessM4Tv2TextToUnitModel(config, embed_tokens_decoder)
+
+ self.lm_head = nn.Linear(config.hidden_size, config.t2u_vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TTextToUnitForConditionalGeneration.get_encoder
+ def get_encoder(self):
+ return self.model.encoder
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TTextToUnitForConditionalGeneration.get_decoder
+ def get_decoder(self):
+ return self.model.decoder
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TTextToUnitForConditionalGeneration.get_output_embeddings
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TTextToUnitForConditionalGeneration.set_output_embeddings
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TTextToUnitForConditionalGeneration.get_input_embeddings
+ def get_input_embeddings(self):
+ return self.model.decoder.embed_tokens
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TTextToUnitForConditionalGeneration.set_input_embeddings
+ def set_input_embeddings(self, value):
+ self.model.decoder.embed_tokens = value
+
+ @add_start_docstrings_to_model_forward(M4T_TEXT_TO_UNITS_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ char_input_ids: torch.LongTensor = None,
+ char_count_per_id: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs,
+ ) -> Union[Seq2SeqLMOutput, Tuple[torch.FloatTensor]]:
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.model(
+ input_ids,
+ char_input_ids=char_input_ids,
+ char_count_per_id=char_count_per_id,
+ attention_mask=attention_mask,
+ encoder_outputs=encoder_outputs,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ lm_logits = self.lm_head(outputs[0])
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ labels = labels.to(lm_logits.device)
+ masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (lm_logits,) + outputs[1:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return SeamlessM4Tv2TextToUnitOutput(
+ last_hidden_state=lm_logits,
+ padding_mask=outputs.padding_mask,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ loss=masked_lm_loss,
+ )
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TTextToUnitForConditionalGeneration._tie_weights
+ def _tie_weights(self) -> None:
+ if getattr(self.config, "tie_word_embeddings", True):
+ output_embeddings = self.get_output_embeddings()
+ if output_embeddings is not None:
+ self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())
+
+
+############ VOCODER related code ################
+
+
+HIFIGAN_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`SeamlessM4Tv2Config`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+# Copied from transformers.models.speecht5.modeling_speecht5.HifiGanResidualBlock
+class HifiGanResidualBlock(nn.Module):
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5), leaky_relu_slope=0.1):
+ super().__init__()
+ self.leaky_relu_slope = leaky_relu_slope
+
+ self.convs1 = nn.ModuleList(
+ [
+ nn.Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ stride=1,
+ dilation=dilation[i],
+ padding=self.get_padding(kernel_size, dilation[i]),
+ )
+ for i in range(len(dilation))
+ ]
+ )
+ self.convs2 = nn.ModuleList(
+ [
+ nn.Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ stride=1,
+ dilation=1,
+ padding=self.get_padding(kernel_size, 1),
+ )
+ for _ in range(len(dilation))
+ ]
+ )
+
+ def get_padding(self, kernel_size, dilation=1):
+ return (kernel_size * dilation - dilation) // 2
+
+ def apply_weight_norm(self):
+ for layer in self.convs1:
+ nn.utils.weight_norm(layer)
+ for layer in self.convs2:
+ nn.utils.weight_norm(layer)
+
+ def remove_weight_norm(self):
+ for layer in self.convs1:
+ nn.utils.remove_weight_norm(layer)
+ for layer in self.convs2:
+ nn.utils.remove_weight_norm(layer)
+
+ def forward(self, hidden_states):
+ for conv1, conv2 in zip(self.convs1, self.convs2):
+ residual = hidden_states
+ hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope)
+ hidden_states = conv1(hidden_states)
+ hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope)
+ hidden_states = conv2(hidden_states)
+ hidden_states = hidden_states + residual
+ return hidden_states
+
+
+class SeamlessM4Tv2VariancePredictor(nn.Module):
+ def __init__(self, embed_dim, hidden_dim, kernel_size, var_pred_dropout):
+ super().__init__()
+
+ self.conv1 = nn.Conv1d(
+ embed_dim,
+ hidden_dim,
+ kernel_size=kernel_size,
+ padding="same",
+ )
+ self.activation_fuction = nn.ReLU()
+ self.ln1 = nn.LayerNorm(hidden_dim)
+ self.dropout_module = nn.Dropout(p=var_pred_dropout)
+ self.conv2 = nn.Conv1d(
+ hidden_dim,
+ hidden_dim,
+ kernel_size=kernel_size,
+ padding="same",
+ )
+ self.ln2 = nn.LayerNorm(hidden_dim)
+ self.proj = nn.Linear(hidden_dim, 1)
+
+ def forward(self, hidden_states: Tensor, padding_mask: Tensor = None) -> Tensor:
+ # Input: B x T x C; Output: B x T
+ if padding_mask is not None:
+ hidden_states = hidden_states.masked_fill(~padding_mask.bool().unsqueeze(-1), 0.0)
+ hidden_states = self.conv1(hidden_states.transpose(1, 2))
+ hidden_states = self.activation_fuction(hidden_states).transpose(1, 2)
+ hidden_states = self.dropout_module(self.ln1(hidden_states))
+ if padding_mask is not None:
+ hidden_states = hidden_states.masked_fill(~padding_mask.bool().unsqueeze(-1), 0.0)
+ hidden_states = self.conv2(hidden_states.transpose(1, 2))
+ hidden_states = self.activation_fuction(hidden_states).transpose(1, 2)
+ hidden_states = self.dropout_module(self.ln2(hidden_states))
+ return self.proj(hidden_states).squeeze(dim=2)
+
+
+# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4THifiGan with SeamlessM4T->SeamlessM4Tv2
+class SeamlessM4Tv2HifiGan(nn.Module):
+ def __init__(self, config: SeamlessM4Tv2Config):
+ super().__init__()
+ model_in_dim = config.unit_embed_dim + config.lang_embed_dim + config.spkr_embed_dim
+ self.leaky_relu_slope = config.leaky_relu_slope
+ self.num_kernels = len(config.resblock_kernel_sizes)
+ self.num_upsamples = len(config.upsample_rates)
+ self.conv_pre = nn.Conv1d(
+ model_in_dim,
+ config.upsample_initial_channel,
+ kernel_size=7,
+ stride=1,
+ padding=3,
+ )
+
+ self.upsampler = nn.ModuleList()
+ for i, (upsample_rate, kernel_size) in enumerate(zip(config.upsample_rates, config.upsample_kernel_sizes)):
+ self.upsampler.append(
+ nn.ConvTranspose1d(
+ config.upsample_initial_channel // (2**i),
+ config.upsample_initial_channel // (2 ** (i + 1)),
+ kernel_size=kernel_size,
+ stride=upsample_rate,
+ padding=(kernel_size - upsample_rate) // 2,
+ )
+ )
+
+ self.resblocks = nn.ModuleList()
+ for i in range(len(self.upsampler)):
+ channels = config.upsample_initial_channel // (2 ** (i + 1))
+ for kernel_size, dilation in zip(config.resblock_kernel_sizes, config.resblock_dilation_sizes):
+ self.resblocks.append(HifiGanResidualBlock(channels, kernel_size, dilation, config.leaky_relu_slope))
+
+ self.conv_post = nn.Conv1d(channels, 1, kernel_size=7, stride=1, padding=3)
+
+ def forward(self, input_embeds: torch.FloatTensor) -> torch.FloatTensor:
+ r"""
+ Converts a log-mel spectrogram into a speech waveform. Passing a batch of log-mel spectrograms returns a batch
+ of speech waveforms. Passing a single, un-batched log-mel spectrogram returns a single, un-batched speech
+ waveform.
+
+ Args:
+ spectrogram (`torch.FloatTensor`):
+ Tensor containing the log-mel spectrograms. Can be batched and of shape `(batch_size, sequence_length,
+ model_in_dim)`, or un-batched and of shape `(sequence_length, model_in_dim)`. Note that `model_in_dim`
+ is the sum of `config.unit_embed_dim`, `config.lang_embed_dim` and `config.spkr_embed_dim`.
+
+ Returns:
+ `torch.FloatTensor`: Tensor containing the speech waveform. If the input spectrogram is batched, will be of
+ shape `(batch_size, num_frames,)`. If un-batched, will be of shape `(num_frames,)`.
+ """
+
+ hidden_states = self.conv_pre(input_embeds)
+ for i in range(self.num_upsamples):
+ hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope)
+ hidden_states = self.upsampler[i](hidden_states)
+
+ res_state = self.resblocks[i * self.num_kernels](hidden_states)
+ for j in range(1, self.num_kernels):
+ res_state += self.resblocks[i * self.num_kernels + j](hidden_states)
+ hidden_states = res_state / self.num_kernels
+
+ hidden_states = nn.functional.leaky_relu(hidden_states)
+ hidden_states = self.conv_post(hidden_states)
+ hidden_states = torch.tanh(hidden_states)
+
+ # remove seq-len dim since this collapses to 1
+ waveform = hidden_states.squeeze(1)
+
+ return waveform
+
+
+@add_start_docstrings(
+ """Code HiFi-GAN vocoder as described in this [repository](https://github.com/facebookresearch/speech-resynthesis).""",
+ HIFIGAN_START_DOCSTRING,
+)
+class SeamlessM4Tv2CodeHifiGan(PreTrainedModel):
+ config_class = SeamlessM4Tv2Config
+ main_input_name = "input_embeds"
+ _no_split_modules = []
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.pad_token_id = config.t2u_pad_token_id
+ embed_dim = config.unit_embed_dim
+ kernel_size = config.variance_predictor_kernel_size
+ var_pred_dropout = config.var_pred_dropout
+ self.dur_predictor = SeamlessM4Tv2VariancePredictor(embed_dim, embed_dim, kernel_size, var_pred_dropout)
+
+ self.unit_embedding = nn.Embedding(config.unit_hifi_gan_vocab_size, config.unit_embed_dim)
+ self.speaker_embedding = nn.Embedding(config.vocoder_num_spkrs, config.spkr_embed_dim)
+ self.language_embedding = nn.Embedding(config.vocoder_num_langs, config.lang_embed_dim)
+
+ self.hifi_gan = SeamlessM4Tv2HifiGan(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TCodeHifiGan._get_dur_output_lengths
+ def _get_dur_output_lengths(self, input_ids, dur_out):
+ """
+ Computes the output length after the duration layer.
+ """
+ unit_lengths = (input_ids != self.pad_token_id).sum(1)
+
+ # take care of edge cases where no padding or too many padding
+ unit_lengths = torch.clamp(unit_lengths, 0, dur_out.shape[1] - 1)
+
+ cumulative_dur_out = torch.cumsum(dur_out, dim=1)
+ unit_lengths = cumulative_dur_out.gather(dim=1, index=unit_lengths.unsqueeze(1)).squeeze()
+
+ return unit_lengths
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TCodeHifiGan._get_output_hifigan_lengths
+ def _get_output_hifigan_lengths(self, input_lengths: Union[torch.LongTensor, int]):
+ """
+ Computes the output length of the hifigan convolutional layers
+ """
+
+ def _conv_out_length(input_length, kernel_size, stride, pad, dilation=1):
+ # 1D convolutional layer output length formula taken
+ # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
+ return (
+ torch.div(input_length + 2 * pad - dilation * (kernel_size - 1) - 1, stride, rounding_mode="floor") + 1
+ )
+
+ def _transpose_conv_out_length(input_length, kernel_size, stride, pad, dilation=1):
+ return (input_length - 1) * stride - 2 * pad + dilation * (kernel_size - 1) + 1
+
+ # conv_pre
+ input_lengths = _conv_out_length(input_lengths, 7, 1, 3)
+
+ # upsampler
+ for i, (upsample_rate, kernel_size) in enumerate(
+ zip(self.config.upsample_rates, self.config.upsample_kernel_sizes)
+ ):
+ input_lengths = _transpose_conv_out_length(
+ input_lengths, kernel_size, upsample_rate, (kernel_size - upsample_rate) // 2
+ )
+
+ # resblock
+ for i in range(len(self.config.upsample_rates)):
+ for kernel_size, dilation in zip(self.config.resblock_kernel_sizes, self.config.resblock_dilation_sizes):
+ for dil in dilation:
+ input_lengths = _conv_out_length(
+ input_lengths, kernel_size, 1, (kernel_size - 1) * dil // 2, dilation=dil
+ )
+
+ for dil in dilation:
+ input_lengths = _conv_out_length(input_lengths, kernel_size, 1, (kernel_size - 1) // 2, dilation=1)
+
+ # conv_post
+ input_lengths = _conv_out_length(input_lengths, 7, 1, 3)
+
+ return input_lengths
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TCodeHifiGan.forward with SeamlessM4T->SeamlessM4Tv2, spkr_id->speaker_id
+ def forward(
+ self, input_ids: torch.LongTensor, speaker_id: torch.Tensor, lang_id: torch.Tensor
+ ) -> Tuple[torch.Tensor]:
+ """
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`SeamlessM4Tv2TextToUnitForConditionalGeneration`]. [What are input
+ IDs?](../glossary#input-ids)
+ speaker_id (`int`, *optional*):
+ The id of the speaker used for speech synthesis. Must be lower than `config.vocoder_num_spkrs`.
+ tgt_lang (`str`, *optional*):
+ The language id to use as target language for translation.
+ """
+ hidden_states = self.unit_embedding(input_ids).transpose(1, 2)
+ spkr = self.speaker_embedding(speaker_id).transpose(1, 2)
+ lang = self.language_embedding(lang_id).transpose(1, 2)
+
+ log_dur_pred = self.dur_predictor(hidden_states.transpose(1, 2))
+ dur_out = torch.clamp(torch.round((torch.exp(log_dur_pred) - 1)).long(), min=1)
+ # B x C x T
+ if hidden_states.size(0) == 1:
+ hidden_states = torch.repeat_interleave(hidden_states, dur_out.view(-1), dim=2)
+ else:
+ # if batched sample, need to interleave per sample, and pad -> loss of parallelism
+ if hidden_states.shape[0] > 1 and self.training:
+ logger.warning(
+ """`self.training=True` and you use batching. You lose parallelism during the hifigan
+ forward pass because the samples are interleaved."""
+ )
+ hidden_states = [
+ torch.repeat_interleave(hidden_state, duration, dim=-1).transpose(0, 1)
+ for (hidden_state, duration) in zip(hidden_states, dur_out)
+ ]
+
+ hidden_states = nn.utils.rnn.pad_sequence(hidden_states, batch_first=True).transpose(1, 2)
+
+ spkr = spkr.repeat(1, 1, hidden_states.shape[-1])
+ lang = lang.repeat(1, 1, hidden_states.shape[-1])
+ hidden_states = torch.cat([lang, hidden_states, spkr], dim=1)
+
+ hidden_states = self.hifi_gan(hidden_states)
+
+ unit_lengths = self._get_dur_output_lengths(input_ids, dur_out)
+ lengths = self._get_output_hifigan_lengths(unit_lengths)
+
+ return hidden_states, lengths
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TCodeHifiGan._init_weights
+ def _init_weights(self, module):
+ """Initialize the weights."""
+ if isinstance(module, (nn.Linear, nn.Conv1d, nn.ConvTranspose1d)):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TCodeHifiGan.apply_weight_norm
+ def apply_weight_norm(self):
+ nn.utils.weight_norm(self.hifi_gan.conv_pre)
+ for layer in self.hifi_gan.upsampler:
+ nn.utils.weight_norm(layer)
+ for layer in self.hifi_gan.resblocks:
+ layer.apply_weight_norm()
+ nn.utils.weight_norm(self.hifi_gan.conv_post)
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TCodeHifiGan.remove_weight_norm
+ def remove_weight_norm(self):
+ nn.utils.remove_weight_norm(self.hifi_gan.conv_pre)
+ for layer in self.hifi_gan.upsampler:
+ nn.utils.remove_weight_norm(layer)
+ for layer in self.hifi_gan.resblocks:
+ layer.remove_weight_norm()
+ nn.utils.remove_weight_norm(self.hifi_gan.conv_post)
+
+
+############ WHOLE MODEL related code ################
+
+
+@add_start_docstrings(
+ "The text-to-text SeamlessM4Tv2 Model transformer which can be used for T2TT.",
+ SEAMLESS_M4T_V2_START_DOCSTRING,
+)
+# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForTextToText with SeamlessM4T->SeamlessM4Tv2,SeamlessM4Tv2Tokenizer->SeamlessM4TTokenizer, SeamlessM4Tv2Processor->SeamlessM4TProcessor
+class SeamlessM4Tv2ForTextToText(SeamlessM4Tv2PreTrainedModel):
+ _keys_to_ignore_on_load_missing = ["speech_encoder", "t2u_model", "vocoder"]
+ main_input_name = "input_ids"
+
+ _tied_weights_keys = [
+ "lm_head.weight",
+ "text_encoder.embed_tokens.weight",
+ "text_decoder.embed_tokens.weight",
+ ]
+
+ def __init__(self, config: SeamlessM4Tv2Config):
+ super().__init__(config)
+
+ self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id)
+
+ self.text_encoder = SeamlessM4Tv2Encoder(config, self.shared)
+ self.text_decoder = SeamlessM4Tv2Decoder(config, self.shared)
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_encoder(self):
+ return self.text_encoder
+
+ def get_decoder(self):
+ return self.text_decoder
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def get_input_embeddings(self):
+ return self.text_decoder.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.text_encoder.embed_tokens = value
+ self.text_decoder.embed_tokens = value
+ self.shared = value
+
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.text_encoder.embed_tokens, self.shared)
+ self._tie_or_clone_weights(self.text_decoder.embed_tokens, self.shared)
+ self._tie_or_clone_weights(self.lm_head, self.shared)
+
+ @add_start_docstrings_to_model_forward(M4T_TEXT_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs,
+ ) -> Union[Seq2SeqLMOutput, Tuple[torch.FloatTensor]]:
+ if labels is not None:
+ if use_cache:
+ logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
+ use_cache = False
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
+ decoder_input_ids = shift_tokens_right(
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
+ )
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if encoder_outputs is None:
+ encoder_outputs = self.text_encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ encoder_attention_mask = attention_mask
+
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
+ decoder_outputs = self.text_decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=encoder_attention_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ lm_logits = self.lm_head(decoder_outputs[0])
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ labels = labels.to(lm_logits.device)
+ masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ outputs = decoder_outputs + encoder_outputs
+ output = (lm_logits,) + outputs[1:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return Seq2SeqLMOutput(
+ loss=masked_lm_loss,
+ logits=lm_logits,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+ def generate(
+ self,
+ input_ids=None,
+ tgt_lang=None,
+ generation_config=None,
+ logits_processor=None,
+ stopping_criteria=None,
+ prefix_allowed_tokens_fn=None,
+ synced_gpus=False,
+ **kwargs,
+ ):
+ """
+ Generates sequences of token ids.
+
+
+
+ Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the
+ model's default generation configuration. You can override any `generation_config` by passing the corresponding
+ parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`.
+
+ For an overview of generation strategies and code examples, check out the [following
+ guide](./generation_strategies).
+
+
+
+ Parameters:
+ input_ids (`torch.Tensor` of varying shape depending on the modality, *optional*):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`SeamlessM4TTokenizer`] or [`SeamlessM4TProcessor`]. See
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ tgt_lang (`str`, *optional*):
+ The language to use as target language for translation.
+ generation_config (`~generation.GenerationConfig`, *optional*):
+ The generation configuration to be used as base parametrization for the generation call. `**kwargs`
+ passed to generate matching the attributes of `generation_config` will override them. If
+ `generation_config` is not provided, the default will be used, which had the following loading
+ priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
+ configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
+ default values, whose documentation should be checked to parameterize generation.
+ logits_processor (`LogitsProcessorList`, *optional*):
+ Custom logits processors that complement the default logits processors built from arguments and
+ generation config. If a logit processor is passed that is already created with the arguments or a
+ generation config an error is thrown. This feature is intended for advanced users.
+ stopping_criteria (`StoppingCriteriaList`, *optional*):
+ Custom stopping criteria that complement the default stopping criteria built from arguments and a
+ generation config. If a stopping criteria is passed that is already created with the arguments or a
+ generation config an error is thrown. This feature is intended for advanced users.
+ prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], List[int]]`, *optional*):
+ If provided, this function constraints the beam search to allowed tokens only at each step. If not
+ provided no constraint is applied. This function takes 2 arguments: the batch ID `batch_id` and
+ `input_ids`. It has to return a list with the allowed tokens for the next generation step conditioned
+ on the batch ID `batch_id` and the previously generated tokens `inputs_ids`. This argument is useful
+ for constrained generation conditioned on the prefix, as described in [Autoregressive Entity
+ Retrieval](https://arxiv.org/abs/2010.00904).
+ synced_gpus (`bool`, *optional*, defaults to `False`):
+ Whether to continue running the while loop until max_length (needed for ZeRO stage 3)
+ kwargs (`Dict[str, Any]`, *optional*):
+ Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
+ forwarded to the `forward` function of the model.
+
+ Return:
+ [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True`
+ or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. The possible
+ [`~utils.ModelOutput`] types are:
+ - [`~generation.GenerateEncoderDecoderOutput`],
+ - [`~generation.GenerateBeamEncoderDecoderOutput`]
+ """
+ # prepare text_decoder_input_ids
+ text_decoder_input_ids = kwargs.pop("decoder_input_ids", None)
+ # overwrite text_decoder_input_ids if tgt_lang is passed. The latter gets priority over decoder_input_ids.
+ if tgt_lang is not None:
+ batch_size = len(input_ids) if input_ids is not None else len(kwargs.get("inputs_embeds"))
+
+ if hasattr(self.generation_config, "text_decoder_lang_to_code_id"):
+ # also accept __xxx__
+ tgt_lang = tgt_lang.replace("__", "")
+ if tgt_lang not in self.generation_config.text_decoder_lang_to_code_id:
+ raise ValueError(
+ f"""`tgt_lang={tgt_lang}` is not supported by this model. Please specify a `tgt_lang` in
+ {', '.join(self.generation_config.text_decoder_lang_to_code_id.keys())}"""
+ )
+ # tgt_lang gets priority over decoder input ids
+ text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang)
+ text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size).to(self.device)
+ else:
+ raise ValueError(
+ """This model generation config doesn't have a `text_decoder_lang_to_code_id` key which maps
+ the target language to the right token id. Make sure to load the right generation config."""
+ )
+ else:
+ # only a warning, otherwise errors appear in the tests
+ logger.warning(
+ """You must either specify a `tgt_lang` or pass a correct `text_decoder_input_ids` to get
+ a correct generation, otherwise the generation will probably make no sense."""
+ )
+
+ return super().generate(
+ input_ids,
+ generation_config,
+ logits_processor,
+ stopping_criteria,
+ prefix_allowed_tokens_fn,
+ synced_gpus,
+ decoder_input_ids=text_decoder_input_ids,
+ **kwargs,
+ )
+
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ # cut decoder_input_ids if past is used
+ if past_key_values is not None:
+ decoder_input_ids = decoder_input_ids[:, -1:]
+
+ return {
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
+ "encoder_outputs": encoder_outputs,
+ "past_key_values": past_key_values,
+ "decoder_input_ids": decoder_input_ids,
+ "attention_mask": attention_mask,
+ "use_cache": use_cache,
+ }
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ # cached cross_attention states don't have to be reordered -> they are always the same
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],
+ )
+ return reordered_past
+
+
+@add_start_docstrings(
+ "The speech-to-text SeamlessM4Tv2 Model transformer which can be used for S2TT.",
+ SEAMLESS_M4T_V2_START_DOCSTRING,
+)
+class SeamlessM4Tv2ForSpeechToText(SeamlessM4Tv2PreTrainedModel):
+ _keys_to_ignore_on_load_missing = ["text_decoder", "t2u_model", "vocoder"]
+ main_input_name = "input_features"
+
+ _tied_weights_keys = [
+ "lm_head.weight",
+ "text_decoder.embed_tokens.weight",
+ ]
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToText.__init__ with SeamlessM4T->SeamlessM4Tv2
+ def __init__(self, config: SeamlessM4Tv2Config):
+ super().__init__(config)
+
+ self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id)
+ self.speech_encoder = SeamlessM4Tv2SpeechEncoder(config)
+ self.text_decoder = SeamlessM4Tv2Decoder(config, self.shared)
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToText.get_encoder
+ def get_encoder(self):
+ return self.speech_encoder
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToText.get_decoder
+ def get_decoder(self):
+ return self.text_decoder
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToText.get_output_embeddings
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToText.set_output_embeddings
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToText.get_input_embeddings
+ def get_input_embeddings(self):
+ return self.text_decoder.embed_tokens
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToText.set_input_embeddings
+ def set_input_embeddings(self, value):
+ self.text_decoder.embed_tokens = value
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToText._tie_weights
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.text_decoder.embed_tokens, self.shared)
+ self._tie_or_clone_weights(self.lm_head, self.shared)
+
+ @add_start_docstrings_to_model_forward(M4T_SPEECH_INPUTS_DOCSTRING)
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToText.forward
+ def forward(
+ self,
+ input_features: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs,
+ ) -> Union[Seq2SeqLMOutput, Tuple[torch.FloatTensor]]:
+ if labels is not None:
+ if use_cache:
+ logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
+ use_cache = False
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
+ decoder_input_ids = shift_tokens_right(
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
+ )
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if encoder_outputs is None:
+ encoder_outputs = self.speech_encoder(
+ input_features=input_features,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ encoder_attention_mask = attention_mask
+ if attention_mask is not None:
+ sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to(
+ encoder_outputs[0].device
+ )
+ encoder_attention_mask = _compute_new_attention_mask(
+ hidden_states=encoder_outputs[0], seq_lens=sub_sampled_lengths
+ )
+
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
+ decoder_outputs = self.text_decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=encoder_attention_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ lm_logits = self.lm_head(decoder_outputs[0])
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ labels = labels.to(lm_logits.device)
+ masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ outputs = decoder_outputs + encoder_outputs
+ output = (lm_logits,) + outputs[1:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return Seq2SeqLMOutput(
+ loss=masked_lm_loss,
+ logits=lm_logits,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToText.generate
+ def generate(
+ self,
+ input_features=None,
+ tgt_lang=None,
+ generation_config=None,
+ logits_processor=None,
+ stopping_criteria=None,
+ prefix_allowed_tokens_fn=None,
+ synced_gpus=False,
+ **kwargs,
+ ):
+ """
+ Generates sequences of token ids.
+
+
+
+ Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the
+ model's default generation configuration. You can override any `generation_config` by passing the corresponding
+ parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`.
+
+ For an overview of generation strategies and code examples, check out the [following
+ guide](./generation_strategies).
+
+
+
+ Parameters:
+ input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_banks)`):
+ Input audio features. This should be returnes by the [`SeamlessM4TFeatureExtractor`] class or the
+ [`SeamlessM4TProcessor`] class. See [`SeamlessM4TFeatureExtractor.__call__`] for details.
+
+ tgt_lang (`str`, *optional*):
+ The language to use as target language for translation.
+ generation_config (`~generation.GenerationConfig`, *optional*):
+ The generation configuration to be used as base parametrization for the generation call. `**kwargs`
+ passed to generate matching the attributes of `generation_config` will override them. If
+ `generation_config` is not provided, the default will be used, which had the following loading
+ priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
+ configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
+ default values, whose documentation should be checked to parameterize generation.
+ logits_processor (`LogitsProcessorList`, *optional*):
+ Custom logits processors that complement the default logits processors built from arguments and
+ generation config. If a logit processor is passed that is already created with the arguments or a
+ generation config an error is thrown. This feature is intended for advanced users.
+ stopping_criteria (`StoppingCriteriaList`, *optional*):
+ Custom stopping criteria that complement the default stopping criteria built from arguments and a
+ generation config. If a stopping criteria is passed that is already created with the arguments or a
+ generation config an error is thrown. This feature is intended for advanced users.
+ prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], List[int]]`, *optional*):
+ If provided, this function constraints the beam search to allowed tokens only at each step. If not
+ provided no constraint is applied. This function takes 2 arguments: the batch ID `batch_id` and
+ `input_ids`. It has to return a list with the allowed tokens for the next generation step conditioned
+ on the batch ID `batch_id` and the previously generated tokens `inputs_ids`. This argument is useful
+ for constrained generation conditioned on the prefix, as described in [Autoregressive Entity
+ Retrieval](https://arxiv.org/abs/2010.00904).
+ synced_gpus (`bool`, *optional*, defaults to `False`):
+ Whether to continue running the while loop until max_length (needed for ZeRO stage 3)
+ kwargs (`Dict[str, Any]`, *optional*):
+ Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
+ forwarded to the `forward` function of the model.
+
+ Return:
+ [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True`
+ or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. The possible
+ [`~utils.ModelOutput`] types are:
+ - [`~generation.GenerateEncoderDecoderOutput`],
+ - [`~generation.GenerateBeamEncoderDecoderOutput`]
+ """
+ text_decoder_input_ids = kwargs.pop("decoder_input_ids", None)
+ # overwrite text_decoder_input_ids if tgt_lang is passed. The latter gets priority over decoder_input_ids.
+ if tgt_lang is not None:
+ inputs = kwargs.get("input_embeds") if input_features is None else input_features
+ inputs = (
+ inputs
+ if inputs is not None
+ else kwargs.get("encoder_outputs", {"last_hidden_state": None})["last_hidden_state"]
+ )
+ batch_size = len(inputs)
+
+ if hasattr(self.generation_config, "text_decoder_lang_to_code_id"):
+ # also accept __xxx__
+ tgt_lang = tgt_lang.replace("__", "")
+ if tgt_lang not in self.generation_config.text_decoder_lang_to_code_id:
+ raise ValueError(
+ f"""`tgt_lang={tgt_lang}` is not supported by this model. Please specify a `tgt_lang` in
+ {', '.join(self.generation_config.text_decoder_lang_to_code_id.keys())}"""
+ )
+ # tgt_lang gets priority over decoder input ids
+ text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang)
+ text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size).to(self.device)
+ else:
+ raise ValueError(
+ """This model generation config doesn't have a `text_decoder_lang_to_code_id` key which maps
+ the target language to the right token id. Make sure to load the right generation config."""
+ )
+ else:
+ # only a warning, otherwise errors appear in the tests
+ logger.warning(
+ """You must either specify a `tgt_lang` or pass a correct `text_decoder_input_ids` to get
+ a correct generation, otherwise the generation will probably make no sense."""
+ )
+ return super().generate(
+ input_features,
+ generation_config,
+ logits_processor,
+ stopping_criteria,
+ prefix_allowed_tokens_fn,
+ synced_gpus,
+ decoder_input_ids=text_decoder_input_ids,
+ **kwargs,
+ )
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToText.prepare_inputs_for_generation
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ # cut decoder_input_ids if past is used
+ if past_key_values is not None:
+ decoder_input_ids = decoder_input_ids[:, -1:]
+
+ return {
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
+ "encoder_outputs": encoder_outputs,
+ "past_key_values": past_key_values,
+ "decoder_input_ids": decoder_input_ids,
+ "attention_mask": attention_mask,
+ "use_cache": use_cache,
+ }
+
+ @staticmethod
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToText._reorder_cache
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ # cached cross_attention states don't have to be reordered -> they are always the same
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],
+ )
+ return reordered_past
+
+
+@add_start_docstrings(
+ "The text-to-speech SeamlessM4Tv2 Model transformer which can be used for T2ST.",
+ SEAMLESS_M4T_V2_START_DOCSTRING,
+)
+class SeamlessM4Tv2ForTextToSpeech(SeamlessM4Tv2PreTrainedModel):
+ _keys_to_ignore_on_load_missing = ["speech_encoder"]
+ main_input_name = "input_ids"
+
+ _tied_weights_keys = [
+ "lm_head.weight",
+ "text_encoder.embed_tokens.weight",
+ "text_decoder.embed_tokens.weight",
+ ]
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForTextToSpeech.__init__ with SeamlessM4T->SeamlessM4Tv2
+ def __init__(self, config: SeamlessM4Tv2Config):
+ super().__init__(config)
+
+ self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id)
+
+ self.text_encoder = SeamlessM4Tv2Encoder(config, self.shared)
+ self.text_decoder = SeamlessM4Tv2Decoder(config, self.shared)
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ self.t2u_model = SeamlessM4Tv2TextToUnitForConditionalGeneration(config)
+ self.vocoder = SeamlessM4Tv2CodeHifiGan(config)
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForTextToSpeech.get_encoder
+ def get_encoder(self):
+ return self.text_encoder
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForTextToSpeech.get_decoder
+ def get_decoder(self):
+ return self.text_decoder
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForTextToSpeech.get_output_embeddings
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForTextToSpeech.set_output_embeddings
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForTextToSpeech.get_input_embeddings
+ def get_input_embeddings(self):
+ return self.text_decoder.embed_tokens
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForTextToSpeech.set_input_embeddings
+ def set_input_embeddings(self, value):
+ self.text_encoder.embed_tokens = value
+ self.text_decoder.embed_tokens = value
+ self.shared = value
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForTextToSpeech._tie_weights
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.text_encoder.embed_tokens, self.shared)
+ self._tie_or_clone_weights(self.text_decoder.embed_tokens, self.shared)
+ self._tie_or_clone_weights(self.lm_head, self.shared)
+
+ @add_start_docstrings_to_model_forward(M4T_TEXT_INPUTS_DOCSTRING)
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForTextToSpeech.forward with SeamlessM4T->SeamlessM4Tv2
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Seq2SeqLMOutput, Tuple[torch.FloatTensor]]:
+ if labels is not None:
+ if use_cache:
+ logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
+ use_cache = False
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
+ decoder_input_ids = shift_tokens_right(
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
+ )
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if encoder_outputs is None:
+ # if encoder_outputs is not None, it's probably used within a .generate method so no need to warn
+ logger.warning(
+ "This is the same forward method as `SeamlessM4Tv2ForTextToText`."
+ "It doesn't use the text-to-unit model `SeamlessM4Tv2TextToUnitForConditionalGeneration`."
+ "If you want to generate speech, use the `.generate` method."
+ )
+ encoder_outputs = self.text_encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ encoder_attention_mask = attention_mask
+
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
+ decoder_outputs = self.text_decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=encoder_attention_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ lm_logits = self.lm_head(decoder_outputs[0])
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ labels = labels.to(lm_logits.device)
+ masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ outputs = decoder_outputs + encoder_outputs
+ output = (lm_logits,) + outputs[1:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return Seq2SeqLMOutput(
+ loss=masked_lm_loss,
+ logits=lm_logits,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+ @torch.no_grad()
+ def generate(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ return_intermediate_token_ids: Optional[bool] = None,
+ tgt_lang: Optional[str] = None,
+ speaker_id: Optional[int] = 0,
+ **kwargs,
+ ) -> Union[torch.Tensor, SeamlessM4Tv2GenerationOutput]:
+ """
+ Generates translated audio waveforms.
+
+
+
+ This method successively calls the `.generate` function of two different sub-models. You can specify keyword
+ arguments at two different levels: general arguments that will be passed to both models, or prefixed arguments
+ that will be passed to one of them.
+
+ For example, calling `.generate(input_ids, num_beams=4, speech_do_sample=True)` will successively perform
+ beam-search decoding on the text model, and multinomial beam-search sampling on the speech model.
+
+ For an overview of generation strategies and code examples, check out the [following
+ guide](./generation_strategies).
+
+
+
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`SeamlessM4TTokenizer`] or [`SeamlessM4TProcessor`]. See
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ return_intermediate_token_ids (`bool`, *optional*):
+ If `True`, also returns the intermediate generated text and unit tokens. Set to `True` if you also want
+ to get translated text alongside the audio.
+ tgt_lang (`str`, *optional*):
+ The language to use as target language for translation.
+ speaker_id (`int`, *optional*, defaults to 0):
+ The id of the speaker used for speech synthesis. Must be lower than `config.vocoder_num_spkrs`.
+ kwargs (*optional*):
+ Remaining dictionary of keyword arguments that will be passed to [`GenerationMixin.generate`]. Keyword
+ arguments are of two types:
+
+ - Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model,
+ except for `decoder_input_ids` which will only be passed through the text components.
+ - With a *text_* or *speech_* prefix, they will be input for the `generate` method of the
+ text model and speech model respectively. It has the priority over the keywords without a prefix.
+
+ This means you can, for example, specify a generation strategy for one generation but not for the
+ other.
+
+
+ Returns:
+ `Union[SeamlessM4Tv2GenerationOutput, Tuple[Tensor]]`:
+ - If `return_intermediate_token_ids`, returns [`SeamlessM4Tv2GenerationOutput`].
+ - If not `return_intermediate_token_ids`, returns a tuple composed of waveforms of shape `(batch_size,
+ sequence_length)`and and `waveform_lengths` which gives the length of each sample.
+ """
+ batch_size = len(input_ids) if input_ids is not None else len(kwargs.get("inputs_embeds"))
+
+ if tgt_lang is None:
+ raise ValueError("You must specify a `tgt_lang` to generate translated speech.")
+ else:
+ # also accept __xxx__
+ tgt_lang = tgt_lang.replace("__", "")
+ for key in ["text_decoder_lang_to_code_id", "t2u_lang_code_to_id", "vocoder_lang_code_to_id"]:
+ lang_code_to_id = getattr(self.generation_config, key, None)
+ if lang_code_to_id is None:
+ raise ValueError(
+ f"""This model generation config doesn't have a `{key}` key which maps the target language
+ to the right token id. Make sure to load the right generation config."""
+ )
+ elif tgt_lang not in lang_code_to_id:
+ raise ValueError(
+ f"""`tgt_lang={tgt_lang}` is not supported by this model.
+ Please specify a `tgt_lang` in {','.join(lang_code_to_id.keys())}. Note that SeamlessM4Tv2 supports
+ more languages for text translation than for speech synthesis."""
+ )
+
+ kwargs_text, kwargs_speech = format_speech_generation_kwargs(kwargs)
+ kwargs_text["output_hidden_states"] = True
+ kwargs_text["return_dict_in_generate"] = True
+ kwargs_text["output_scores"] = True
+
+ text_decoder_input_ids = kwargs_text.get("decoder_input_ids")
+
+ # overwrite text_decoder_input_ids if tgt_lang is passed. The latter gets priority over decoder_input_ids.
+ text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang)
+ text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size).to(self.device)
+
+ kwargs_text["decoder_input_ids"] = text_decoder_input_ids
+
+ # first generation
+ text_generation_output = super().generate(input_ids, **kwargs_text)
+ sequences = text_generation_output.sequences
+
+ # prepare second generation
+ num_return_sequences = len(sequences) // batch_size
+ attention_mask = kwargs_speech.get("attention_mask", kwargs_text.get("attention_mask", None))
+
+ if attention_mask is not None:
+ # repeat attention mask alongside batch dimension
+ attention_mask = torch.repeat_interleave(attention_mask, num_return_sequences, dim=0)
+ encoder_hidden_states = text_generation_output.encoder_hidden_states[-1]
+
+ # repeat attention mask alongside batch dimension
+ encoder_hidden_states = torch.repeat_interleave(encoder_hidden_states, num_return_sequences, dim=0)
+
+ # get decoder last hidden state - must do a pass through the text decoder
+ t2u_input_embeds = self.text_decoder(
+ input_ids=sequences[:, :-1], # Manually trim the final EOS token
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=attention_mask,
+ ).last_hidden_state
+
+ pad_token_id = self.generation_config.pad_token_id
+
+ # Compute new attention mask
+ seq_lens = (sequences[:, :-1] != pad_token_id).int().sum(1)
+ t2u_model_attention_mask = _compute_new_attention_mask(t2u_input_embeds, seq_lens)
+ kwargs_speech["attention_mask"] = t2u_model_attention_mask
+
+ # REMOVE EOS and lang_id
+ t2u_input_ids = sequences[:, 2:-1]
+ # replace every other EOS
+ t2u_input_ids = torch.masked_fill(
+ t2u_input_ids, t2u_input_ids == self.generation_config.eos_token_id, pad_token_id
+ )
+
+ # compute t2u_char_input_ids
+ t2u_subwords = self._indices_to_subwords(t2u_input_ids)
+ t2u_char_count_per_id = self._count_character_length_in_subword(
+ t2u_input_ids, t2u_subwords, pad_token_id=pad_token_id
+ )
+
+ # Add pads for lang, EOS tokens as per NLLB "source" tokenizer mode.
+ pad_zero = t2u_char_count_per_id.new_zeros((t2u_char_count_per_id.shape[0], 1))
+ t2u_char_count_per_id = torch.cat([pad_zero, t2u_char_count_per_id, pad_zero], dim=1)
+ t2u_char_input_ids = self._get_char_input_ids(
+ t2u_input_ids, t2u_subwords, t2u_char_count_per_id, pad_token_id=pad_token_id
+ )
+
+ # second pass
+ t2u_output = self.t2u_model(
+ inputs_embeds=t2u_input_embeds,
+ char_input_ids=t2u_char_input_ids,
+ char_count_per_id=t2u_char_count_per_id,
+ **kwargs_speech,
+ )
+
+ t2u_logits = t2u_output[0]
+ padding_mask = t2u_output[1].bool()
+
+ # The text-to-unit model is non auto-regressive. We keep the ability to use sampling with temperature
+ temperature = kwargs_speech.get("temperature", None)
+ if (temperature is None or temperature == 1.0) or not kwargs_speech.get("do_sample", False):
+ unit_ids = t2u_logits.argmax(dim=-1)
+ else:
+ t2u_logits = t2u_logits / temperature
+ # apply softmax
+ probs = nn.functional.softmax(t2u_logits, dim=-1)
+ # reshape to 2D: (batch_size, seq_len, t2u_vocab_size) -> (batch_size*seq_len, t2u_vocab_size)
+ probs = probs.reshape((-1, probs.shape[2]))
+ # multinomial then reshape : (batch_size*seq_len)-> (batch_size,seq_len)
+ unit_ids = torch.multinomial(probs, num_samples=1).view(t2u_logits.shape[0], -1)
+
+ output_unit_ids = unit_ids.detach().clone()
+
+ replace_mask = (unit_ids == self.config.t2u_eos_token_id) | (~padding_mask)
+ # replace eos per pad
+ unit_ids = unit_ids.masked_fill(replace_mask, self.config.t2u_pad_token_id)
+
+ # offset of control symbols
+ unit_ids = torch.where(
+ unit_ids == self.config.t2u_pad_token_id, unit_ids, unit_ids - self.config.vocoder_offset
+ )
+
+ vocoder_tgt_lang_id = self.generation_config.vocoder_lang_code_to_id.get(tgt_lang)
+ vocoder_tgt_lang_id = torch.tensor([[vocoder_tgt_lang_id]] * len(unit_ids)).to(self.device)
+
+ speaker_id = torch.tensor([[speaker_id]] * len(unit_ids)).to(self.device)
+
+ waveform, waveform_lengths = self.vocoder(
+ input_ids=unit_ids, speaker_id=speaker_id, lang_id=vocoder_tgt_lang_id
+ )
+
+ if return_intermediate_token_ids:
+ return SeamlessM4Tv2GenerationOutput(
+ waveform=waveform,
+ waveform_lengths=waveform_lengths,
+ sequences=sequences,
+ unit_sequences=output_unit_ids,
+ )
+
+ return waveform, waveform_lengths
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForTextToSpeech.prepare_inputs_for_generation
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ # cut decoder_input_ids if past is used
+ if past_key_values is not None:
+ decoder_input_ids = decoder_input_ids[:, -1:]
+
+ return {
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
+ "encoder_outputs": encoder_outputs,
+ "past_key_values": past_key_values,
+ "decoder_input_ids": decoder_input_ids,
+ "attention_mask": attention_mask,
+ "use_cache": use_cache,
+ }
+
+ @staticmethod
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForTextToSpeech._reorder_cache
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ # cached cross_attention states don't have to be reordered -> they are always the same
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],
+ )
+ return reordered_past
+
+
+@add_start_docstrings(
+ "The speech-to-speech SeamlessM4Tv2 Model transformer which can be used for S2ST.",
+ SEAMLESS_M4T_V2_START_DOCSTRING,
+)
+class SeamlessM4Tv2ForSpeechToSpeech(SeamlessM4Tv2PreTrainedModel):
+ _keys_to_ignore_on_load_missing = ["text_encoder"]
+ main_input_name = "input_features"
+
+ _tied_weights_keys = [
+ "lm_head.weight",
+ "text_decoder.embed_tokens.weight",
+ ]
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToSpeech.__init__ with SeamlessM4T->SeamlessM4Tv2
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id)
+ self.speech_encoder = SeamlessM4Tv2SpeechEncoder(config)
+ self.text_decoder = SeamlessM4Tv2Decoder(config, self.shared)
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ self.t2u_model = SeamlessM4Tv2TextToUnitForConditionalGeneration(config)
+ self.vocoder = SeamlessM4Tv2CodeHifiGan(config)
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToSpeech.get_encoder
+ def get_encoder(self):
+ return self.speech_encoder
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToSpeech.get_decoder
+ def get_decoder(self):
+ return self.text_decoder
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToSpeech.get_output_embeddings
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToSpeech.set_output_embeddings
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToSpeech.get_input_embeddings
+ def get_input_embeddings(self):
+ return self.text_decoder.embed_tokens
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToSpeech.set_input_embeddings
+ def set_input_embeddings(self, value):
+ self.text_decoder.embed_tokens = value
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToSpeech._tie_weights
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.text_decoder.embed_tokens, self.shared)
+ self._tie_or_clone_weights(self.lm_head, self.shared)
+
+ @add_start_docstrings_to_model_forward(M4T_SPEECH_INPUTS_DOCSTRING)
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToSpeech.forward with SeamlessM4T->SeamlessM4Tv2
+ def forward(
+ self,
+ input_features: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs,
+ ) -> Union[Seq2SeqLMOutput, Tuple[torch.FloatTensor]]:
+ if labels is not None:
+ if use_cache:
+ logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
+ use_cache = False
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
+ decoder_input_ids = shift_tokens_right(
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
+ )
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if encoder_outputs is None:
+ # if encoder_outputs is not None, it's probably used within a .generate method so no need to warn
+ logger.warning(
+ "This is the same forward method as `SeamlessM4Tv2ForSpeechToText`. It doesn't use `self.t2u_model`."
+ "If you want to generate speech, use the `generate` method."
+ )
+
+ encoder_outputs = self.speech_encoder(
+ input_features=input_features,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ encoder_attention_mask = attention_mask
+ if attention_mask is not None:
+ sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to(
+ encoder_outputs[0].device
+ )
+ encoder_attention_mask = _compute_new_attention_mask(
+ hidden_states=encoder_outputs[0], seq_lens=sub_sampled_lengths
+ )
+
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
+ decoder_outputs = self.text_decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=encoder_attention_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ lm_logits = self.lm_head(decoder_outputs[0])
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ labels = labels.to(lm_logits.device)
+ masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ outputs = decoder_outputs + encoder_outputs
+ output = (lm_logits,) + outputs[1:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return Seq2SeqLMOutput(
+ loss=masked_lm_loss,
+ logits=lm_logits,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+ @torch.no_grad()
+ def generate(
+ self,
+ input_features: Optional[torch.Tensor] = None,
+ return_intermediate_token_ids: Optional[bool] = None,
+ tgt_lang: Optional[str] = None,
+ speaker_id: Optional[int] = 0,
+ **kwargs,
+ ) -> Union[torch.Tensor, SeamlessM4Tv2GenerationOutput]:
+ """
+ Generates translated audio waveforms.
+
+
+
+ This method successively calls the `.generate` function of two different sub-models. You can specify keyword
+ arguments at two different levels: general arguments that will be passed to both models, or prefixed arguments
+ that will be passed to one of them.
+
+ For example, calling `.generate(input_features, num_beams=4, speech_do_sample=True)` will successively perform
+ beam-search decoding on the text model, and multinomial beam-search sampling on the speech model.
+
+ For an overview of generation strategies and code examples, check out the [following
+ guide](./generation_strategies).
+
+
+
+ Args:
+ input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_banks)`):
+ Input audio features. This should be returnes by the [`SeamlessM4TFeatureExtractor`] class or the
+ [`SeamlessM4TProcessor`] class. See [`SeamlessM4TFeatureExtractor.__call__`] for details.
+ return_intermediate_token_ids (`bool`, *optional*):
+ If `True`, also returns the intermediate generated text and unit tokens. Set to `True` if you also want
+ to get translated text alongside the audio.
+ tgt_lang (`str`, *optional*):
+ The language to use as target language for translation.
+ speaker_id (`int`, *optional*, defaults to 0):
+ The id of the speaker used for speech synthesis. Must be lower than `config.vocoder_num_spkrs`.
+
+ kwargs (*optional*):
+ Remaining dictionary of keyword arguments that will be passed to [`GenerationMixin.generate`]. Keyword
+ arguments are of two types:
+
+ - Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model,
+ except for `decoder_input_ids` which will only be passed through the text components.
+ - With a *text_* or *speech_* prefix, they will be input for the `generate` method of the
+ text model and speech model respectively. It has the priority over the keywords without a prefix.
+
+ This means you can, for example, specify a generation strategy for one generation but not for the
+ other.
+
+
+ Returns:
+ `Union[SeamlessM4Tv2GenerationOutput, Tuple[Tensor]]`:
+ - If `return_intermediate_token_ids`, returns [`SeamlessM4Tv2GenerationOutput`].
+ - If not `return_intermediate_token_ids`, returns a tuple composed of waveforms of shape `(batch_size,
+ sequence_length)`and and `waveform_lengths` which gives the length of each sample.
+ """
+ batch_size = len(input_features) if input_features is not None else len(kwargs.get("inputs_embeds"))
+
+ if tgt_lang is None:
+ raise ValueError("You must specify a `tgt_lang` to generate translated speech.")
+ else:
+ # also accept __xxx__
+ tgt_lang = tgt_lang.replace("__", "")
+ for key in ["text_decoder_lang_to_code_id", "t2u_lang_code_to_id", "vocoder_lang_code_to_id"]:
+ lang_code_to_id = getattr(self.generation_config, key, None)
+ if lang_code_to_id is None:
+ raise ValueError(
+ f"""This model generation config doesn't have a `{key}` key which maps the target language
+ to the right token id. Make sure to load the right generation config."""
+ )
+ elif tgt_lang not in lang_code_to_id:
+ raise ValueError(
+ f"""`tgt_lang={tgt_lang}` is not supported by this model.
+ Please specify a `tgt_lang` in {','.join(lang_code_to_id.keys())}. Note that SeamlessM4Tv2 supports
+ more languages for text translation than for speech synthesis."""
+ )
+
+ kwargs_text, kwargs_speech = format_speech_generation_kwargs(kwargs)
+ kwargs_text["output_hidden_states"] = True
+ kwargs_text["return_dict_in_generate"] = True
+ kwargs_text["output_scores"] = True
+
+ text_decoder_input_ids = kwargs_text.get("decoder_input_ids")
+ # overwrite text_decoder_input_ids if tgt_lang is passed. The latter gets priority over decoder_input_ids.
+ text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang)
+ text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size).to(self.device)
+
+ kwargs_text["decoder_input_ids"] = text_decoder_input_ids
+
+ # first generation
+ text_generation_output = super().generate(input_features, **kwargs_text)
+ sequences = text_generation_output.sequences
+
+ # prepare second generation
+ num_return_sequences = len(sequences) // batch_size
+ attention_mask = kwargs_speech.get("attention_mask", kwargs_text.get("attention_mask", None))
+
+ # get last_hidden_state from encoder
+ encoder_hidden_states = self.speech_encoder(input_features=input_features, attention_mask=attention_mask)[0]
+
+ # input modality = speech so new attention mask for the decoder
+ if attention_mask is not None:
+ sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to(
+ encoder_hidden_states.device
+ )
+ attention_mask = _compute_new_attention_mask(
+ hidden_states=encoder_hidden_states, seq_lens=sub_sampled_lengths
+ )
+
+ # repeat attention mask alongside batch dimension
+ attention_mask = torch.repeat_interleave(attention_mask, num_return_sequences, dim=0)
+
+ # repeat attention mask alongside batch dimension
+ encoder_hidden_states = torch.repeat_interleave(encoder_hidden_states, num_return_sequences, dim=0)
+
+ # get decoder last hidden state - must do a pass through the text decoder
+ t2u_input_embeds = self.text_decoder(
+ input_ids=sequences[:, :-1], # Manually trim the final EOS token
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=attention_mask,
+ ).last_hidden_state
+
+ pad_token_id = self.generation_config.pad_token_id
+
+ # Compute new attention mask
+ seq_lens = (sequences[:, :-1] != pad_token_id).int().sum(1)
+ t2u_model_attention_mask = _compute_new_attention_mask(t2u_input_embeds, seq_lens)
+ kwargs_speech["attention_mask"] = t2u_model_attention_mask
+
+ # REMOVE EOS and lang_id
+ t2u_input_ids = sequences[:, 2:-1]
+ # replace every other EOS
+ t2u_input_ids = torch.masked_fill(
+ t2u_input_ids, t2u_input_ids == self.generation_config.eos_token_id, pad_token_id
+ )
+
+ # compute t2u_char_input_ids
+ t2u_subwords = self._indices_to_subwords(t2u_input_ids)
+ t2u_char_count_per_id = self._count_character_length_in_subword(
+ t2u_input_ids, t2u_subwords, pad_token_id=pad_token_id
+ )
+
+ # Add pads for lang, EOS tokens as per NLLB "source" tokenizer mode.
+ pad_zero = t2u_char_count_per_id.new_zeros((t2u_char_count_per_id.shape[0], 1))
+ t2u_char_count_per_id = torch.cat([pad_zero, t2u_char_count_per_id, pad_zero], dim=1)
+ t2u_char_input_ids = self._get_char_input_ids(
+ t2u_input_ids, t2u_subwords, t2u_char_count_per_id, pad_token_id=pad_token_id
+ )
+
+ # second pass
+ t2u_output = self.t2u_model(
+ inputs_embeds=t2u_input_embeds,
+ char_input_ids=t2u_char_input_ids,
+ char_count_per_id=t2u_char_count_per_id,
+ **kwargs_speech,
+ )
+
+ t2u_logits = t2u_output[0]
+ padding_mask = t2u_output[1].bool()
+
+ # The text-to-unit model is non auto-regressive. We keep the ability to use sampling with temperature
+ temperature = kwargs_speech.get("temperature", None)
+ if (temperature is None or temperature == 1.0) or not kwargs_speech.get("do_sample", False):
+ unit_ids = t2u_logits.argmax(dim=-1)
+ else:
+ t2u_logits = t2u_logits / temperature
+ # apply softmax
+ probs = nn.functional.softmax(t2u_logits, dim=-1)
+ # reshape to 2D: (batch_size, seq_len, t2u_vocab_size) -> (batch_size*seq_len, t2u_vocab_size)
+ probs = probs.reshape((-1, probs.shape[2]))
+ # multinomial then reshape : (batch_size*seq_len)-> (batch_size,seq_len)
+ unit_ids = torch.multinomial(probs, num_samples=1).view(t2u_logits.shape[0], -1)
+
+ output_unit_ids = unit_ids.detach().clone()
+
+ replace_mask = (unit_ids == self.config.t2u_eos_token_id) | (~padding_mask)
+ # replace eos per pad
+ unit_ids = unit_ids.masked_fill(replace_mask, self.config.t2u_pad_token_id)
+
+ # offset of control symbols
+ unit_ids = torch.where(
+ unit_ids == self.config.t2u_pad_token_id, unit_ids, unit_ids - self.config.vocoder_offset
+ )
+
+ vocoder_tgt_lang_id = self.generation_config.vocoder_lang_code_to_id.get(tgt_lang)
+ vocoder_tgt_lang_id = torch.tensor([[vocoder_tgt_lang_id]] * len(unit_ids)).to(self.device)
+
+ speaker_id = torch.tensor([[speaker_id]] * len(unit_ids)).to(self.device)
+
+ waveform, waveform_lengths = self.vocoder(
+ input_ids=unit_ids, speaker_id=speaker_id, lang_id=vocoder_tgt_lang_id
+ )
+
+ if return_intermediate_token_ids:
+ return SeamlessM4Tv2GenerationOutput(
+ waveform=waveform,
+ waveform_lengths=waveform_lengths,
+ sequences=sequences,
+ unit_sequences=output_unit_ids,
+ )
+
+ return waveform, waveform_lengths
+
+ @staticmethod
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToSpeech._reorder_cache
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ # cached cross_attention states don't have to be reordered -> they are always the same
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],
+ )
+ return reordered_past
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToSpeech.prepare_inputs_for_generation
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ # cut decoder_input_ids if past is used
+ if past_key_values is not None:
+ decoder_input_ids = decoder_input_ids[:, -1:]
+
+ return {
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
+ "encoder_outputs": encoder_outputs,
+ "past_key_values": past_key_values,
+ "decoder_input_ids": decoder_input_ids,
+ "attention_mask": attention_mask,
+ "use_cache": use_cache,
+ }
+
+
+@add_start_docstrings(
+ "The original SeamlessM4Tv2 Model transformer which can be used for every tasks available (S2ST, S2TT, T2TT, T2ST).",
+ SEAMLESS_M4T_V2_START_DOCSTRING,
+ """
+ current_modality (`str`, *optional*, defaults to `"text"`):
+ Default modality. Used only to initialize the model. It can be set to `"text"` or `"speech"`.
+ This will be updated automatically according to the modality passed to the forward and generate passes (`input_ids` for text and `input_features` for audio).
+ """,
+)
+class SeamlessM4Tv2Model(SeamlessM4Tv2PreTrainedModel):
+ _tied_weights_keys = [
+ "lm_head.weight",
+ "text_encoder.embed_tokens.weight",
+ "text_decoder.embed_tokens.weight",
+ ]
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TModel.__init__ with SeamlessM4T->SeamlessM4Tv2
+ def __init__(self, config, current_modality="text"):
+ super().__init__(config)
+
+ self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id)
+
+ self.text_encoder = SeamlessM4Tv2Encoder(config, self.shared)
+ self.speech_encoder = SeamlessM4Tv2SpeechEncoder(config)
+ self.text_decoder = SeamlessM4Tv2Decoder(config, self.shared)
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ self.current_modality = current_modality
+ if current_modality == "speech":
+ self.main_input_name = "input_features"
+
+ # these models already call post_init in their initialization
+ self.t2u_model = SeamlessM4Tv2TextToUnitForConditionalGeneration(config)
+ self.vocoder = SeamlessM4Tv2CodeHifiGan(config)
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TModel.set_modality
+ def set_modality(self, modality="text"):
+ if modality == "text":
+ self.main_input_name = "input_ids"
+ self.current_modality = "text"
+ elif modality == "speech":
+ self.main_input_name = "input_features"
+ self.current_modality = "speech"
+ else:
+ raise ValueError(f"`modality={modality}` is not a valid modality. It must be `text` or `speech`.")
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TModel.get_encoder
+ def get_encoder(self):
+ if self.current_modality == "text":
+ return self.text_encoder
+ else:
+ return self.speech_encoder
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TModel.get_output_embeddings
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TModel.set_output_embeddings
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TModel.get_input_embeddings
+ def get_input_embeddings(self):
+ return self.text_decoder.embed_tokens
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TModel.set_input_embeddings
+ def set_input_embeddings(self, value):
+ self.text_encoder.embed_tokens = value
+ self.text_decoder.embed_tokens = value
+ self.shared = value
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TModel._tie_weights
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.text_encoder.embed_tokens, self.shared)
+ self._tie_or_clone_weights(self.text_decoder.embed_tokens, self.shared)
+ self._tie_or_clone_weights(self.lm_head, self.shared)
+
+ @add_start_docstrings_to_model_forward(M4T_MODEL_INPUTS_DOCSTRING)
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TModel.forward with SeamlessM4T->SeamlessM4Tv2
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ input_features: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs,
+ ) -> Union[Seq2SeqLMOutput, Tuple[torch.FloatTensor]]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if labels is not None:
+ if use_cache:
+ logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
+ use_cache = False
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
+ decoder_input_ids = shift_tokens_right(
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
+ )
+
+ if input_ids is None and input_features is None and inputs_embeds is None and encoder_outputs is None:
+ raise ValueError(
+ "`input_ids`,`input_features`, `inputs_embeds` and `encoder_outputs` are all empty. Make sure at least one of them is not."
+ )
+ elif input_features is not None:
+ if input_ids is not None:
+ logger.warning(
+ "`input_ids` is not `None` but `input_features` has been given."
+ "`input_features` will be used in priority through the `speech_encoder`. "
+ "Make sure that `input_features` and `input_ids` are mutually exclusive."
+ )
+
+ if inputs_embeds is not None:
+ logger.warning(
+ "`inputs_embeds` is not `None` but `input_features` has been given."
+ "`input_features` will be used in priority through `speech_encoder`. "
+ "`inputs_embeds` will be ignored."
+ )
+
+ # if encoder_outputs is not None, it's probably used within a .generate method so no need to warn
+ logger.warning(
+ "This calls the same method `forward` as `SeamlessM4Tv2ForTextToText` and `SeamlessM4Tv2ForSpeechToText`"
+ "depending on the input modality. If you want to generate speech, use the `generate` method."
+ )
+
+ self.set_modality("speech")
+
+ encoder_outputs = self.speech_encoder(
+ input_features=input_features,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ elif input_ids is not None or inputs_embeds is not None:
+ # if encoder_outputs is not None, it's probably used within a .generate method so no need to warn
+ logger.warning(
+ "This calls the same method `forward` as `SeamlessM4Tv2ForTextToText` and `SeamlessM4Tv2ForSpeechToText`"
+ "depending on the input modality. If you want to generate speech, use the `generate` method."
+ )
+ self.set_modality("text")
+ encoder_outputs = self.text_encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ encoder_attention_mask = attention_mask
+ # input modality = speech so new attention mask
+ if self.current_modality == "speech" and attention_mask is not None:
+ sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to(
+ encoder_outputs[0].device
+ )
+ encoder_attention_mask = _compute_new_attention_mask(
+ hidden_states=encoder_outputs[0], seq_lens=sub_sampled_lengths
+ )
+
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
+ decoder_outputs = self.text_decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=encoder_attention_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ lm_logits = self.lm_head(decoder_outputs[0])
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ labels = labels.to(lm_logits.device)
+ masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ outputs = decoder_outputs + encoder_outputs
+ output = (lm_logits,) + outputs[1:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return Seq2SeqLMOutput(
+ loss=masked_lm_loss,
+ logits=lm_logits,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+ @torch.no_grad()
+ def generate(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ input_features: Optional[torch.Tensor] = None,
+ return_intermediate_token_ids: Optional[bool] = None,
+ tgt_lang: Optional[str] = None,
+ speaker_id: Optional[int] = 0,
+ generate_speech: Optional[bool] = True,
+ **kwargs,
+ ) -> Union[torch.Tensor, SeamlessM4Tv2GenerationOutput]:
+ """
+ Generates translated token ids and/or translated audio waveforms.
+
+
+
+ This method successively calls the `.generate` function of two different sub-models. You can specify keyword
+ arguments at two different levels: general arguments that will be passed to both models, or prefixed arguments
+ that will be passed to one of them.
+
+ For example, calling `.generate(input_ids=input_ids, num_beams=4, speech_do_sample=True)` will successively
+ perform beam-search decoding on the text model, and multinomial beam-search sampling on the speech model.
+
+ For an overview of generation strategies and code examples, check out the [following
+ guide](./generation_strategies).
+
+
+
+
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`SeamlessM4TTokenizer`] or [`SeamlessM4TProcessor`]. See
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_banks)`, *optional*):
+ Input audio features. This should be returnes by the [`SeamlessM4TFeatureExtractor`] class or the
+ [`SeamlessM4TProcessor`] class. See [`SeamlessM4TFeatureExtractor.__call__`] for details.
+ return_intermediate_token_ids (`bool`, *optional*):
+ If `True`, also returns the intermediate generated text and unit tokens. Set to `True` if you also want
+ to get translated text alongside the audio. Note that if `generate_speech=True`, this parameter will be
+ ignored.
+ tgt_lang (`str`, *optional*):
+ The language to use as target language for translation.
+ speaker_id (`int`, *optional*, defaults to 0):
+ The id of the speaker used for speech synthesis. Must be lower than `config.vocoder_num_spkrs`.
+ generate_speech (`bool`, *optional*, defaults to `True`):
+ If `False`, will only returns the text tokens and won't generate speech.
+
+ kwargs (*optional*):
+ Remaining dictioy of keyword arguments that will be passed to [`GenerationMixin.generate`]. Keyword
+ arguments are of two types:
+
+ - Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model,
+ except for `decoder_input_ids` which will only be passed through the text components.
+ - With a *text_* or *speech_* prefix, they will be input for the `generate` method of the
+ text model and speech model respectively. It has the priority over the keywords without a prefix.
+
+ This means you can, for example, specify a generation strategy for one generation but not for the
+ other.
+
+ Returns:
+ `Union[SeamlessM4Tv2GenerationOutput, Tuple[Tensor], ModelOutput]`:
+ - If `generate_speech` and `return_intermediate_token_ids`, returns [`SeamlessM4Tv2GenerationOutput`].
+ - If `generate_speech` and not `return_intermediate_token_ids`, returns a tuple composed of waveforms of
+ shape `(batch_size, sequence_length)`and and `waveform_lengths` which gives the length of each sample.
+ - If `generate_speech=False`, it will returns `ModelOutput`.
+ """
+ if input_ids is None and input_features is None and kwargs.get("inputs_embeds", None) is None:
+ raise ValueError(
+ "`input_ids`,`input_features` and `inputs_embeds` are all empty. Make sure at least one of them is not."
+ )
+
+ if generate_speech and tgt_lang is None:
+ raise ValueError("You must specify a `tgt_lang` to generate translated speech.")
+
+ if tgt_lang is not None:
+ # also accept __xxx__
+ tgt_lang = tgt_lang.replace("__", "")
+ if generate_speech:
+ keys_to_check = ["text_decoder_lang_to_code_id", "t2u_lang_code_to_id", "vocoder_lang_code_to_id"]
+ else:
+ keys_to_check = ["text_decoder_lang_to_code_id"]
+ for key in keys_to_check:
+ lang_code_to_id = getattr(self.generation_config, key, None)
+ if lang_code_to_id is None:
+ raise ValueError(
+ f"""This model generation config doesn't have a `{key}` key which maps the target language
+ to the right token id. Make sure to load the right generation config."""
+ )
+ elif tgt_lang not in lang_code_to_id:
+ raise ValueError(
+ f"""`tgt_lang={tgt_lang}` is not supported by this model.
+ Please specify a `tgt_lang` in {','.join(lang_code_to_id.keys())}. Note that SeamlessM4Tv2 supports
+ more languages for text translation than for speech synthesis."""
+ )
+
+ batch_size = (
+ len(input_features)
+ if input_features is not None
+ else (len(input_ids) if input_ids is not None else len(kwargs.get("inputs_embeds")))
+ )
+
+ kwargs_text, kwargs_speech = format_speech_generation_kwargs(kwargs)
+ kwargs_text["output_hidden_states"] = True
+ kwargs_text["return_dict_in_generate"] = True
+ kwargs_text["output_scores"] = True
+
+ text_decoder_input_ids = kwargs_text.get("decoder_input_ids")
+ # overwrite text_decoder_input_ids if tgt_lang is passed. The latter gets priority over decoder_input_ids.
+ if tgt_lang is not None:
+ # tgt_lang gets priority over decoder input ids
+ text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang)
+ text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size).to(self.device)
+
+ kwargs_text["decoder_input_ids"] = text_decoder_input_ids
+
+ # first generation
+ if input_features is not None:
+ self.set_modality("speech")
+ if input_ids is not None:
+ logger.warning(
+ "`input_features` and `input_ids` are both non empty. `input_features` will be used in priority "
+ "through the speech encoder. Make sure `input_features=None` if you want to use the text encoder."
+ )
+ text_generation_output = super().generate(input_features=input_features, **kwargs_text)
+ else:
+ self.set_modality("text")
+ text_generation_output = super().generate(input_ids=input_ids, input_features=None, **kwargs_text)
+ sequences = text_generation_output.sequences
+
+ if not generate_speech:
+ return text_generation_output
+
+ # prepare second generation
+ num_return_sequences = len(sequences) // batch_size
+ attention_mask = kwargs_speech.get("attention_mask", kwargs_text.get("attention_mask", None))
+
+ # get encoder last hidden states
+ if self.current_modality == "speech":
+ # get last_hidden_state from encoder - must do a pass through the speech encoder
+ encoder_hidden_states = self.speech_encoder(
+ input_features=input_features, attention_mask=attention_mask
+ ).last_hidden_state
+
+ # input modality = speech so new attention mask for the decoder
+ if attention_mask is not None:
+ sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to(
+ encoder_hidden_states.device
+ )
+ attention_mask = _compute_new_attention_mask(
+ hidden_states=encoder_hidden_states, seq_lens=sub_sampled_lengths
+ )
+ else:
+ encoder_hidden_states = text_generation_output.encoder_hidden_states[-1]
+
+ if attention_mask is not None:
+ # repeat attention mask alongside batch dimension
+ attention_mask = torch.repeat_interleave(attention_mask, num_return_sequences, dim=0)
+
+ # repeat attention mask alongside batch dimension
+ encoder_hidden_states = torch.repeat_interleave(encoder_hidden_states, num_return_sequences, dim=0)
+
+ # get decoder last hidden state - must do a pass through the text decoder
+ t2u_input_embeds = self.text_decoder(
+ input_ids=sequences[:, :-1], # Manually trim the final EOS token
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=attention_mask,
+ ).last_hidden_state
+
+ pad_token_id = self.generation_config.pad_token_id
+
+ # Compute new attention mask
+ seq_lens = (sequences[:, :-1] != pad_token_id).int().sum(1)
+ t2u_model_attention_mask = _compute_new_attention_mask(t2u_input_embeds, seq_lens)
+ kwargs_speech["attention_mask"] = t2u_model_attention_mask
+
+ # REMOVE EOS and lang_id
+ t2u_input_ids = sequences[:, 2:-1]
+ # replace every other EOS
+ t2u_input_ids = torch.masked_fill(
+ t2u_input_ids, t2u_input_ids == self.generation_config.eos_token_id, pad_token_id
+ )
+
+ # compute t2u_char_input_ids
+ t2u_subwords = self._indices_to_subwords(t2u_input_ids)
+ t2u_char_count_per_id = self._count_character_length_in_subword(
+ t2u_input_ids, t2u_subwords, pad_token_id=pad_token_id
+ )
+
+ # Add pads for lang, EOS tokens as per NLLB "source" tokenizer mode.
+ pad_zero = t2u_char_count_per_id.new_zeros((t2u_char_count_per_id.shape[0], 1))
+ t2u_char_count_per_id = torch.cat([pad_zero, t2u_char_count_per_id, pad_zero], dim=1)
+ t2u_char_input_ids = self._get_char_input_ids(
+ t2u_input_ids, t2u_subwords, t2u_char_count_per_id, pad_token_id=pad_token_id
+ )
+
+ # second pass
+ t2u_output = self.t2u_model(
+ inputs_embeds=t2u_input_embeds,
+ char_input_ids=t2u_char_input_ids,
+ char_count_per_id=t2u_char_count_per_id,
+ **kwargs_speech,
+ )
+
+ t2u_logits = t2u_output[0]
+ padding_mask = t2u_output[1].bool()
+
+ # The text-to-unit model is non auto-regressive. We keep the ability to use sampling with temperature
+ temperature = kwargs_speech.get("temperature", None)
+ if (temperature is None or temperature == 1.0) or not kwargs_speech.get("do_sample", False):
+ unit_ids = t2u_logits.argmax(dim=-1)
+ else:
+ t2u_logits = t2u_logits / temperature
+ # apply softmax
+ probs = nn.functional.softmax(t2u_logits, dim=-1)
+ # reshape to 2D: (batch_size, seq_len, t2u_vocab_size) -> (batch_size*seq_len, t2u_vocab_size)
+ probs = probs.reshape((-1, probs.shape[2]))
+ # multinomial then reshape : (batch_size*seq_len)-> (batch_size,seq_len)
+ unit_ids = torch.multinomial(probs, num_samples=1).view(t2u_logits.shape[0], -1)
+
+ output_unit_ids = unit_ids.detach().clone()
+
+ replace_mask = (unit_ids == self.config.t2u_eos_token_id) | (~padding_mask)
+ # replace eos per pad
+ unit_ids = unit_ids.masked_fill(replace_mask, self.config.t2u_pad_token_id)
+
+ # offset of control symbols
+ unit_ids = torch.where(
+ unit_ids == self.config.t2u_pad_token_id, unit_ids, unit_ids - self.config.vocoder_offset
+ )
+
+ vocoder_tgt_lang_id = self.generation_config.vocoder_lang_code_to_id.get(tgt_lang)
+ vocoder_tgt_lang_id = torch.tensor([[vocoder_tgt_lang_id]] * len(unit_ids)).to(self.device)
+
+ speaker_id = torch.tensor([[speaker_id]] * len(unit_ids)).to(self.device)
+
+ waveform, waveform_lengths = self.vocoder(
+ input_ids=unit_ids, speaker_id=speaker_id, lang_id=vocoder_tgt_lang_id
+ )
+
+ if return_intermediate_token_ids:
+ return SeamlessM4Tv2GenerationOutput(
+ waveform=waveform,
+ waveform_lengths=waveform_lengths,
+ sequences=sequences,
+ unit_sequences=output_unit_ids,
+ )
+
+ return waveform, waveform_lengths
+
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TModel.prepare_inputs_for_generation
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ # cut decoder_input_ids if past is used
+ if past_key_values is not None:
+ decoder_input_ids = decoder_input_ids[:, -1:]
+
+ return {
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
+ "encoder_outputs": encoder_outputs,
+ "past_key_values": past_key_values,
+ "decoder_input_ids": decoder_input_ids,
+ "attention_mask": attention_mask,
+ "use_cache": use_cache,
+ }
+
+ @staticmethod
+ # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TModel._reorder_cache
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ # cached cross_attention states don't have to be reordered -> they are always the same
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],
+ )
+ return reordered_past
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/superpoint/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/superpoint/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c28aea75e048a65568c7cab109b792895364ba77
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/superpoint/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/transformers/models/superpoint/__pycache__/image_processing_superpoint.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/transformers/models/superpoint/__pycache__/image_processing_superpoint.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bb88504c3a11c43f975f99bc1f64d59f6df7ed61
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/transformers/models/superpoint/__pycache__/image_processing_superpoint.cpython-310.pyc differ