diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/_archive_maps.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/_archive_maps.py
new file mode 100644
index 0000000000000000000000000000000000000000..256813e0883f4540a391f9c59d2bd121ed8a5356
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/_archive_maps.py
@@ -0,0 +1,2774 @@
+# coding=utf-8
+# Copyright 2024 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from collections import OrderedDict
+
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class DeprecatedDict(dict):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ def __getitem__(self, item):
+ logger.warning(
+ "Archive maps are deprecated and will be removed in version v4.40.0 as they are no longer relevant. "
+ "If looking to get all checkpoints for a given architecture, we recommend using `huggingface_hub` "
+ "with the list_models method."
+ )
+ return self[item]
+
+
+class DeprecatedList(list):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ def __getitem__(self, item):
+ logger.warning_once(
+ "Archive maps are deprecated and will be removed in version v4.40.0 as they are no longer relevant. "
+ "If looking to get all checkpoints for a given architecture, we recommend using `huggingface_hub` "
+ "with the `list_models` method."
+ )
+ return super().__getitem__(item)
+
+
+ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "albert/albert-base-v1": "https://huggingface.co/albert/albert-base-v1/resolve/main/config.json",
+ "albert/albert-large-v1": "https://huggingface.co/albert/albert-large-v1/resolve/main/config.json",
+ "albert/albert-xlarge-v1": "https://huggingface.co/albert/albert-xlarge-v1/resolve/main/config.json",
+ "albert/albert-xxlarge-v1": "https://huggingface.co/albert/albert-xxlarge-v1/resolve/main/config.json",
+ "albert/albert-base-v2": "https://huggingface.co/albert/albert-base-v2/resolve/main/config.json",
+ "albert/albert-large-v2": "https://huggingface.co/albert/albert-large-v2/resolve/main/config.json",
+ "albert/albert-xlarge-v2": "https://huggingface.co/albert/albert-xlarge-v2/resolve/main/config.json",
+ "albert/albert-xxlarge-v2": "https://huggingface.co/albert/albert-xxlarge-v2/resolve/main/config.json",
+ }
+)
+
+ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "albert/albert-base-v1",
+ "albert/albert-large-v1",
+ "albert/albert-xlarge-v1",
+ "albert/albert-xxlarge-v1",
+ "albert/albert-base-v2",
+ "albert/albert-large-v2",
+ "albert/albert-xlarge-v2",
+ "albert/albert-xxlarge-v2",
+ ]
+)
+
+TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "albert/albert-base-v1",
+ "albert/albert-large-v1",
+ "albert/albert-xlarge-v1",
+ "albert/albert-xxlarge-v1",
+ "albert/albert-base-v2",
+ "albert/albert-large-v2",
+ "albert/albert-xlarge-v2",
+ "albert/albert-xxlarge-v2",
+ ]
+)
+
+ALIGN_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"kakaobrain/align-base": "https://huggingface.co/kakaobrain/align-base/resolve/main/config.json"}
+)
+
+ALIGN_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["kakaobrain/align-base"])
+
+ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json"}
+)
+
+ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["BAAI/AltCLIP"])
+
+AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "MIT/ast-finetuned-audioset-10-10-0.4593": "https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
+ }
+)
+
+AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["MIT/ast-finetuned-audioset-10-10-0.4593"]
+)
+
+AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json"
+ }
+)
+
+AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["huggingface/autoformer-tourism-monthly"])
+
+BARK_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["suno/bark-small", "suno/bark"])
+
+BART_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/bart-large"])
+
+BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "microsoft/beit-base-patch16-224-pt22k": "https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
+ }
+)
+
+BEIT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/beit-base-patch16-224"])
+
+BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "google-bert/bert-base-uncased": "https://huggingface.co/google-bert/bert-base-uncased/resolve/main/config.json",
+ "google-bert/bert-large-uncased": "https://huggingface.co/google-bert/bert-large-uncased/resolve/main/config.json",
+ "google-bert/bert-base-cased": "https://huggingface.co/google-bert/bert-base-cased/resolve/main/config.json",
+ "google-bert/bert-large-cased": "https://huggingface.co/google-bert/bert-large-cased/resolve/main/config.json",
+ "google-bert/bert-base-multilingual-uncased": "https://huggingface.co/google-bert/bert-base-multilingual-uncased/resolve/main/config.json",
+ "google-bert/bert-base-multilingual-cased": "https://huggingface.co/google-bert/bert-base-multilingual-cased/resolve/main/config.json",
+ "google-bert/bert-base-chinese": "https://huggingface.co/google-bert/bert-base-chinese/resolve/main/config.json",
+ "google-bert/bert-base-german-cased": "https://huggingface.co/google-bert/bert-base-german-cased/resolve/main/config.json",
+ "google-bert/bert-large-uncased-whole-word-masking": "https://huggingface.co/google-bert/bert-large-uncased-whole-word-masking/resolve/main/config.json",
+ "google-bert/bert-large-cased-whole-word-masking": "https://huggingface.co/google-bert/bert-large-cased-whole-word-masking/resolve/main/config.json",
+ "google-bert/bert-large-uncased-whole-word-masking-finetuned-squad": "https://huggingface.co/google-bert/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json",
+ "google-bert/bert-large-cased-whole-word-masking-finetuned-squad": "https://huggingface.co/google-bert/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json",
+ "google-bert/bert-base-cased-finetuned-mrpc": "https://huggingface.co/google-bert/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
+ "google-bert/bert-base-german-dbmdz-cased": "https://huggingface.co/google-bert/bert-base-german-dbmdz-cased/resolve/main/config.json",
+ "google-bert/bert-base-german-dbmdz-uncased": "https://huggingface.co/google-bert/bert-base-german-dbmdz-uncased/resolve/main/config.json",
+ "cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
+ "cl-tohoku/bert-base-japanese-whole-word-masking": "https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json",
+ "cl-tohoku/bert-base-japanese-char": "https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json",
+ "cl-tohoku/bert-base-japanese-char-whole-word-masking": "https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json",
+ "TurkuNLP/bert-base-finnish-cased-v1": "https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json",
+ "TurkuNLP/bert-base-finnish-uncased-v1": "https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json",
+ "wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
+ }
+)
+
+BERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "google-bert/bert-base-uncased",
+ "google-bert/bert-large-uncased",
+ "google-bert/bert-base-cased",
+ "google-bert/bert-large-cased",
+ "google-bert/bert-base-multilingual-uncased",
+ "google-bert/bert-base-multilingual-cased",
+ "google-bert/bert-base-chinese",
+ "google-bert/bert-base-german-cased",
+ "google-bert/bert-large-uncased-whole-word-masking",
+ "google-bert/bert-large-cased-whole-word-masking",
+ "google-bert/bert-large-uncased-whole-word-masking-finetuned-squad",
+ "google-bert/bert-large-cased-whole-word-masking-finetuned-squad",
+ "google-bert/bert-base-cased-finetuned-mrpc",
+ "google-bert/bert-base-german-dbmdz-cased",
+ "google-bert/bert-base-german-dbmdz-uncased",
+ "cl-tohoku/bert-base-japanese",
+ "cl-tohoku/bert-base-japanese-whole-word-masking",
+ "cl-tohoku/bert-base-japanese-char",
+ "cl-tohoku/bert-base-japanese-char-whole-word-masking",
+ "TurkuNLP/bert-base-finnish-cased-v1",
+ "TurkuNLP/bert-base-finnish-uncased-v1",
+ "wietsedv/bert-base-dutch-cased",
+ ]
+)
+
+TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "google-bert/bert-base-uncased",
+ "google-bert/bert-large-uncased",
+ "google-bert/bert-base-cased",
+ "google-bert/bert-large-cased",
+ "google-bert/bert-base-multilingual-uncased",
+ "google-bert/bert-base-multilingual-cased",
+ "google-bert/bert-base-chinese",
+ "google-bert/bert-base-german-cased",
+ "google-bert/bert-large-uncased-whole-word-masking",
+ "google-bert/bert-large-cased-whole-word-masking",
+ "google-bert/bert-large-uncased-whole-word-masking-finetuned-squad",
+ "google-bert/bert-large-cased-whole-word-masking-finetuned-squad",
+ "google-bert/bert-base-cased-finetuned-mrpc",
+ "cl-tohoku/bert-base-japanese",
+ "cl-tohoku/bert-base-japanese-whole-word-masking",
+ "cl-tohoku/bert-base-japanese-char",
+ "cl-tohoku/bert-base-japanese-char-whole-word-masking",
+ "TurkuNLP/bert-base-finnish-cased-v1",
+ "TurkuNLP/bert-base-finnish-uncased-v1",
+ "wietsedv/bert-base-dutch-cased",
+ ]
+)
+
+BIG_BIRD_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json",
+ "google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json",
+ "google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json",
+ }
+)
+
+BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["google/bigbird-roberta-base", "google/bigbird-roberta-large", "google/bigbird-base-trivia-itc"]
+)
+
+BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "google/bigbird-pegasus-large-arxiv": "https://huggingface.co/google/bigbird-pegasus-large-arxiv/resolve/main/config.json",
+ "google/bigbird-pegasus-large-pubmed": "https://huggingface.co/google/bigbird-pegasus-large-pubmed/resolve/main/config.json",
+ "google/bigbird-pegasus-large-bigpatent": "https://huggingface.co/google/bigbird-pegasus-large-bigpatent/resolve/main/config.json",
+ }
+)
+
+BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "google/bigbird-pegasus-large-arxiv",
+ "google/bigbird-pegasus-large-pubmed",
+ "google/bigbird-pegasus-large-bigpatent",
+ ]
+)
+
+BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json"}
+)
+
+BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/biogpt", "microsoft/BioGPT-Large"])
+
+BIT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json"}
+)
+
+BIT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["google/bit-50"])
+
+BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/config.json"}
+)
+
+BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/blenderbot-3B"])
+
+BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
+ # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
+}
+
+BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/blenderbot_small-90M"])
+
+BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json",
+ "Salesforce/blip-vqa-capfit-large": "https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json",
+ "Salesforce/blip-image-captioning-base": "https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json",
+ "Salesforce/blip-image-captioning-large": "https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json",
+ "Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json",
+ "Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json",
+ "Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json",
+ "Salesforce/blip-itm-large-flikr": "https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json",
+ }
+)
+
+BLIP_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "Salesforce/blip-vqa-base",
+ "Salesforce/blip-vqa-capfilt-large",
+ "Salesforce/blip-image-captioning-base",
+ "Salesforce/blip-image-captioning-large",
+ "Salesforce/blip-itm-base-coco",
+ "Salesforce/blip-itm-large-coco",
+ "Salesforce/blip-itm-base-flickr",
+ "Salesforce/blip-itm-large-flickr",
+ ]
+)
+
+TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "Salesforce/blip-vqa-base",
+ "Salesforce/blip-vqa-capfilt-large",
+ "Salesforce/blip-image-captioning-base",
+ "Salesforce/blip-image-captioning-large",
+ "Salesforce/blip-itm-base-coco",
+ "Salesforce/blip-itm-large-coco",
+ "Salesforce/blip-itm-base-flickr",
+ "Salesforce/blip-itm-large-flickr",
+ ]
+)
+
+BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json"}
+)
+
+BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["Salesforce/blip2-opt-2.7b"])
+
+BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
+ "bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
+ "bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
+ "bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
+ "bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
+ "bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
+ }
+)
+
+BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "bigscience/bigscience-small-testing",
+ "bigscience/bloom-560m",
+ "bigscience/bloom-1b1",
+ "bigscience/bloom-1b7",
+ "bigscience/bloom-3b",
+ "bigscience/bloom-7b1",
+ "bigscience/bloom",
+ ]
+)
+
+BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json",
+ "BridgeTower/bridgetower-base-itm-mlm": "https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json",
+ }
+)
+
+BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["BridgeTower/bridgetower-base", "BridgeTower/bridgetower-base-itm-mlm"]
+)
+
+BROS_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "jinho8345/bros-base-uncased": "https://huggingface.co/jinho8345/bros-base-uncased/blob/main/config.json",
+ "jinho8345/bros-large-uncased": "https://huggingface.co/jinho8345/bros-large-uncased/blob/main/config.json",
+ }
+)
+
+BROS_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["jinho8345/bros-base-uncased", "jinho8345/bros-large-uncased"])
+
+CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "almanach/camembert-base": "https://huggingface.co/almanach/camembert-base/resolve/main/config.json",
+ "umberto-commoncrawl-cased-v1": "https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json",
+ "umberto-wikipedia-uncased-v1": "https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json",
+ }
+)
+
+CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["almanach/camembert-base", "Musixmatch/umberto-commoncrawl-cased-v1", "Musixmatch/umberto-wikipedia-uncased-v1"]
+)
+
+TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList([])
+
+CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"google/canine-s": "https://huggingface.co/google/canine-s/resolve/main/config.json"}
+)
+
+CANINE_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["google/canine-s", "google/canine-r"])
+
+CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "OFA-Sys/chinese-clip-vit-base-patch16": "https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16/resolve/main/config.json"
+ }
+)
+
+CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["OFA-Sys/chinese-clip-vit-base-patch16"])
+
+CLAP_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["laion/clap-htsat-fused", "laion/clap-htsat-unfused"])
+
+CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"openai/clip-vit-base-patch32": "https://huggingface.co/openai/clip-vit-base-patch32/resolve/main/config.json"}
+)
+
+CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["openai/clip-vit-base-patch32"])
+
+TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["openai/clip-vit-base-patch32"])
+
+CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"CIDAS/clipseg-rd64": "https://huggingface.co/CIDAS/clipseg-rd64/resolve/main/config.json"}
+)
+
+CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["CIDAS/clipseg-rd64-refined"])
+
+CLVP_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"susnato/clvp_dev": "https://huggingface.co/susnato/clvp_dev/resolve/main/config.json"}
+)
+
+CLVP_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["susnato/clvp_dev"])
+
+CODEGEN_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
+ "Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
+ "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
+ "Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
+ "Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
+ "Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
+ "Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
+ "Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
+ "Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
+ "Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
+ "Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
+ "Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
+ }
+)
+
+CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "Salesforce/codegen-350M-nl",
+ "Salesforce/codegen-350M-multi",
+ "Salesforce/codegen-350M-mono",
+ "Salesforce/codegen-2B-nl",
+ "Salesforce/codegen-2B-multi",
+ "Salesforce/codegen-2B-mono",
+ "Salesforce/codegen-6B-nl",
+ "Salesforce/codegen-6B-multi",
+ "Salesforce/codegen-6B-mono",
+ "Salesforce/codegen-16B-nl",
+ "Salesforce/codegen-16B-multi",
+ "Salesforce/codegen-16B-mono",
+ ]
+)
+
+CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "microsoft/conditional-detr-resnet-50": "https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
+ }
+)
+
+CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/conditional-detr-resnet-50"])
+
+CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
+ "YituTech/conv-bert-medium-small": "https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json",
+ "YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
+ }
+)
+
+CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["YituTech/conv-bert-base", "YituTech/conv-bert-medium-small", "YituTech/conv-bert-small"]
+)
+
+TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["YituTech/conv-bert-base", "YituTech/conv-bert-medium-small", "YituTech/conv-bert-small"]
+)
+
+CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"facebook/convnext-tiny-224": "https://huggingface.co/facebook/convnext-tiny-224/resolve/main/config.json"}
+)
+
+CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/convnext-tiny-224"])
+
+CONVNEXTV2_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json"
+ }
+)
+
+CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/convnextv2-tiny-1k-224"])
+
+CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/config.json"}
+)
+
+CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["openbmb/cpm-ant-10b"])
+
+CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"Salesforce/ctrl": "https://huggingface.co/Salesforce/ctrl/resolve/main/config.json"}
+)
+
+CTRL_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["Salesforce/ctrl"])
+
+TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["Salesforce/ctrl"])
+
+CVT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json"}
+)
+
+CVT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "microsoft/cvt-13",
+ "microsoft/cvt-13-384",
+ "microsoft/cvt-13-384-22k",
+ "microsoft/cvt-21",
+ "microsoft/cvt-21-384",
+ "microsoft/cvt-21-384-22k",
+ ]
+)
+
+TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "microsoft/cvt-13",
+ "microsoft/cvt-13-384",
+ "microsoft/cvt-13-384-22k",
+ "microsoft/cvt-21",
+ "microsoft/cvt-21-384",
+ "microsoft/cvt-21-384-22k",
+ ]
+)
+
+DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json"}
+)
+
+DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "facebook/data2vec-vision-base-ft": "https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
+ }
+)
+
+DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "facebook/data2vec-audio-base",
+ "facebook/data2vec-audio-base-10m",
+ "facebook/data2vec-audio-base-100h",
+ "facebook/data2vec-audio-base-960h",
+ ]
+)
+
+DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/data2vec-text-base"])
+
+DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/data2vec-vision-base-ft1k"])
+
+DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "microsoft/deberta-base": "https://huggingface.co/microsoft/deberta-base/resolve/main/config.json",
+ "microsoft/deberta-large": "https://huggingface.co/microsoft/deberta-large/resolve/main/config.json",
+ "microsoft/deberta-xlarge": "https://huggingface.co/microsoft/deberta-xlarge/resolve/main/config.json",
+ "microsoft/deberta-base-mnli": "https://huggingface.co/microsoft/deberta-base-mnli/resolve/main/config.json",
+ "microsoft/deberta-large-mnli": "https://huggingface.co/microsoft/deberta-large-mnli/resolve/main/config.json",
+ "microsoft/deberta-xlarge-mnli": "https://huggingface.co/microsoft/deberta-xlarge-mnli/resolve/main/config.json",
+ }
+)
+
+DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "microsoft/deberta-base",
+ "microsoft/deberta-large",
+ "microsoft/deberta-xlarge",
+ "microsoft/deberta-base-mnli",
+ "microsoft/deberta-large-mnli",
+ "microsoft/deberta-xlarge-mnli",
+ ]
+)
+
+TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["kamalkraj/deberta-base"])
+
+DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
+ "microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
+ "microsoft/deberta-v2-xlarge-mnli": "https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json",
+ "microsoft/deberta-v2-xxlarge-mnli": "https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json",
+ }
+)
+
+DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "microsoft/deberta-v2-xlarge",
+ "microsoft/deberta-v2-xxlarge",
+ "microsoft/deberta-v2-xlarge-mnli",
+ "microsoft/deberta-v2-xxlarge-mnli",
+ ]
+)
+
+TF_DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["kamalkraj/deberta-v2-xlarge"])
+
+DECISION_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "edbeeching/decision-transformer-gym-hopper-medium": "https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
+ }
+)
+
+DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["edbeeching/decision-transformer-gym-hopper-medium"]
+)
+
+DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json"}
+)
+
+DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["sensetime/deformable-detr"])
+
+DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "facebook/deit-base-distilled-patch16-224": "https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
+ }
+)
+
+DEIT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/deit-base-distilled-patch16-224"])
+
+TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/deit-base-distilled-patch16-224"])
+
+MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json"}
+)
+
+MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["speechbrain/m-ctc-t-large"])
+
+OPEN_LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json"}
+)
+
+RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "yjernite/retribert-base-uncased": "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
+ }
+)
+
+RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["yjernite/retribert-base-uncased"])
+
+TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "CarlCochet/trajectory-transformer-halfcheetah-medium-v2": "https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
+ }
+)
+
+TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["CarlCochet/trajectory-transformer-halfcheetah-medium-v2"]
+)
+
+TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"transfo-xl/transfo-xl-wt103": "https://huggingface.co/transfo-xl/transfo-xl-wt103/resolve/main/config.json"}
+)
+
+TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["transfo-xl/transfo-xl-wt103"])
+
+TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["transfo-xl/transfo-xl-wt103"])
+
+VAN_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "Visual-Attention-Network/van-base": "https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
+ }
+)
+
+VAN_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["Visual-Attention-Network/van-base"])
+
+DEPTH_ANYTHING_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "LiheYoung/depth-anything-small-hf": "https://huggingface.co/LiheYoung/depth-anything-small-hf/resolve/main/config.json"
+ }
+)
+
+DEPTH_ANYTHING_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["LiheYoung/depth-anything-small-hf"])
+
+DETA_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json"}
+)
+
+DETA_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["jozhang97/deta-swin-large-o365"])
+
+DETR_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json"}
+)
+
+DETR_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/detr-resnet-50"])
+
+DINAT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json"}
+)
+
+DINAT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["shi-labs/dinat-mini-in1k-224"])
+
+DINOV2_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"facebook/dinov2-base": "https://huggingface.co/facebook/dinov2-base/resolve/main/config.json"}
+)
+
+DINOV2_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/dinov2-base"])
+
+DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
+ "distilbert-base-uncased-distilled-squad": "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json",
+ "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
+ "distilbert-base-cased-distilled-squad": "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json",
+ "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
+ "distilbert-base-multilingual-cased": "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json",
+ "distilbert-base-uncased-finetuned-sst-2-english": "https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json",
+ }
+)
+
+DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "distilbert-base-uncased",
+ "distilbert-base-uncased-distilled-squad",
+ "distilbert-base-cased",
+ "distilbert-base-cased-distilled-squad",
+ "distilbert-base-german-cased",
+ "distilbert-base-multilingual-cased",
+ "distilbert-base-uncased-finetuned-sst-2-english",
+ ]
+)
+
+TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "distilbert-base-uncased",
+ "distilbert-base-uncased-distilled-squad",
+ "distilbert-base-cased",
+ "distilbert-base-cased-distilled-squad",
+ "distilbert-base-multilingual-cased",
+ "distilbert-base-uncased-finetuned-sst-2-english",
+ ]
+)
+
+DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json"}
+)
+
+DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["naver-clova-ix/donut-base"])
+
+DPR_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "facebook/dpr-ctx_encoder-single-nq-base": "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json",
+ "facebook/dpr-question_encoder-single-nq-base": "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json",
+ "facebook/dpr-reader-single-nq-base": "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json",
+ "facebook/dpr-ctx_encoder-multiset-base": "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json",
+ "facebook/dpr-question_encoder-multiset-base": "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json",
+ "facebook/dpr-reader-multiset-base": "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json",
+ }
+)
+
+DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["facebook/dpr-ctx_encoder-single-nq-base", "facebook/dpr-ctx_encoder-multiset-base"]
+)
+
+DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["facebook/dpr-question_encoder-single-nq-base", "facebook/dpr-question_encoder-multiset-base"]
+)
+
+DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["facebook/dpr-reader-single-nq-base", "facebook/dpr-reader-multiset-base"]
+)
+
+TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["facebook/dpr-ctx_encoder-single-nq-base", "facebook/dpr-ctx_encoder-multiset-base"]
+)
+
+TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["facebook/dpr-question_encoder-single-nq-base", "facebook/dpr-question_encoder-multiset-base"]
+)
+
+TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["facebook/dpr-reader-single-nq-base", "facebook/dpr-reader-multiset-base"]
+)
+
+DPT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json"}
+)
+
+DPT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["Intel/dpt-large", "Intel/dpt-hybrid-midas"])
+
+EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "snap-research/efficientformer-l1-300": "https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
+ }
+)
+
+EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["snap-research/efficientformer-l1-300"])
+
+TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["snap-research/efficientformer-l1-300"])
+
+EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json"}
+)
+
+EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["google/efficientnet-b7"])
+
+ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "google/electra-small-generator": "https://huggingface.co/google/electra-small-generator/resolve/main/config.json",
+ "google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/config.json",
+ "google/electra-large-generator": "https://huggingface.co/google/electra-large-generator/resolve/main/config.json",
+ "google/electra-small-discriminator": "https://huggingface.co/google/electra-small-discriminator/resolve/main/config.json",
+ "google/electra-base-discriminator": "https://huggingface.co/google/electra-base-discriminator/resolve/main/config.json",
+ "google/electra-large-discriminator": "https://huggingface.co/google/electra-large-discriminator/resolve/main/config.json",
+ }
+)
+
+ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "google/electra-small-generator",
+ "google/electra-base-generator",
+ "google/electra-large-generator",
+ "google/electra-small-discriminator",
+ "google/electra-base-discriminator",
+ "google/electra-large-discriminator",
+ ]
+)
+
+TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "google/electra-small-generator",
+ "google/electra-base-generator",
+ "google/electra-large-generator",
+ "google/electra-small-discriminator",
+ "google/electra-base-discriminator",
+ "google/electra-large-discriminator",
+ ]
+)
+
+ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
+ "facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
+ }
+)
+
+ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/encodec_24khz", "facebook/encodec_48khz"])
+
+ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "nghuyong/ernie-1.0-base-zh": "https://huggingface.co/nghuyong/ernie-1.0-base-zh/resolve/main/config.json",
+ "nghuyong/ernie-2.0-base-en": "https://huggingface.co/nghuyong/ernie-2.0-base-en/resolve/main/config.json",
+ "nghuyong/ernie-2.0-large-en": "https://huggingface.co/nghuyong/ernie-2.0-large-en/resolve/main/config.json",
+ "nghuyong/ernie-3.0-base-zh": "https://huggingface.co/nghuyong/ernie-3.0-base-zh/resolve/main/config.json",
+ "nghuyong/ernie-3.0-medium-zh": "https://huggingface.co/nghuyong/ernie-3.0-medium-zh/resolve/main/config.json",
+ "nghuyong/ernie-3.0-mini-zh": "https://huggingface.co/nghuyong/ernie-3.0-mini-zh/resolve/main/config.json",
+ "nghuyong/ernie-3.0-micro-zh": "https://huggingface.co/nghuyong/ernie-3.0-micro-zh/resolve/main/config.json",
+ "nghuyong/ernie-3.0-nano-zh": "https://huggingface.co/nghuyong/ernie-3.0-nano-zh/resolve/main/config.json",
+ "nghuyong/ernie-gram-zh": "https://huggingface.co/nghuyong/ernie-gram-zh/resolve/main/config.json",
+ "nghuyong/ernie-health-zh": "https://huggingface.co/nghuyong/ernie-health-zh/resolve/main/config.json",
+ }
+)
+
+ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "nghuyong/ernie-1.0-base-zh",
+ "nghuyong/ernie-2.0-base-en",
+ "nghuyong/ernie-2.0-large-en",
+ "nghuyong/ernie-3.0-base-zh",
+ "nghuyong/ernie-3.0-medium-zh",
+ "nghuyong/ernie-3.0-mini-zh",
+ "nghuyong/ernie-3.0-micro-zh",
+ "nghuyong/ernie-3.0-nano-zh",
+ "nghuyong/ernie-gram-zh",
+ "nghuyong/ernie-health-zh",
+ ]
+)
+
+ERNIE_M_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
+ "susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
+ }
+)
+
+ERNIE_M_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["susnato/ernie-m-base_pytorch", "susnato/ernie-m-large_pytorch"]
+)
+
+ESM_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json"}
+)
+
+ESM_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/esm2_t6_8M_UR50D", "facebook/esm2_t12_35M_UR50D"])
+
+FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
+ "tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
+ }
+)
+
+FALCON_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "tiiuae/falcon-40b",
+ "tiiuae/falcon-40b-instruct",
+ "tiiuae/falcon-7b",
+ "tiiuae/falcon-7b-instruct",
+ "tiiuae/falcon-rw-7b",
+ "tiiuae/falcon-rw-1b",
+ ]
+)
+
+FASTSPEECH2_CONFORMER_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "espnet/fastspeech2_conformer_hifigan": "https://huggingface.co/espnet/fastspeech2_conformer_hifigan/raw/main/config.json"
+ }
+)
+
+FASTSPEECH2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"espnet/fastspeech2_conformer": "https://huggingface.co/espnet/fastspeech2_conformer/raw/main/config.json"}
+)
+
+FASTSPEECH2_CONFORMER_WITH_HIFIGAN_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "espnet/fastspeech2_conformer_with_hifigan": "https://huggingface.co/espnet/fastspeech2_conformer_with_hifigan/raw/main/config.json"
+ }
+)
+
+FASTSPEECH2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["espnet/fastspeech2_conformer"])
+
+FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "flaubert/flaubert_small_cased": "https://huggingface.co/flaubert/flaubert_small_cased/resolve/main/config.json",
+ "flaubert/flaubert_base_uncased": "https://huggingface.co/flaubert/flaubert_base_uncased/resolve/main/config.json",
+ "flaubert/flaubert_base_cased": "https://huggingface.co/flaubert/flaubert_base_cased/resolve/main/config.json",
+ "flaubert/flaubert_large_cased": "https://huggingface.co/flaubert/flaubert_large_cased/resolve/main/config.json",
+ }
+)
+
+FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "flaubert/flaubert_small_cased",
+ "flaubert/flaubert_base_uncased",
+ "flaubert/flaubert_base_cased",
+ "flaubert/flaubert_large_cased",
+ ]
+)
+
+TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList([])
+
+FLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"facebook/flava-full": "https://huggingface.co/facebook/flava-full/resolve/main/config.json"}
+)
+
+FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/flava-full"])
+
+FNET_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
+ "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json",
+ }
+)
+
+FNET_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["google/fnet-base", "google/fnet-large"])
+
+FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json"}
+)
+
+FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/focalnet-tiny"])
+
+FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict({})
+
+FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json",
+ "funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json",
+ "funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json",
+ "funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json",
+ "funnel-transformer/intermediate": "https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json",
+ "funnel-transformer/intermediate-base": "https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json",
+ "funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json",
+ "funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json",
+ "funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json",
+ "funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json",
+ }
+)
+
+FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "funnel-transformer/small",
+ "funnel-transformer/small-base",
+ "funnel-transformer/medium",
+ "funnel-transformer/medium-base",
+ "funnel-transformer/intermediate",
+ "funnel-transformer/intermediate-base",
+ "funnel-transformer/large",
+ "funnel-transformer/large-base",
+ "funnel-transformer/xlarge-base",
+ "funnel-transformer/xlarge",
+ ]
+)
+
+TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "funnel-transformer/small",
+ "funnel-transformer/small-base",
+ "funnel-transformer/medium",
+ "funnel-transformer/medium-base",
+ "funnel-transformer/intermediate",
+ "funnel-transformer/intermediate-base",
+ "funnel-transformer/large",
+ "funnel-transformer/large-base",
+ "funnel-transformer/xlarge-base",
+ "funnel-transformer/xlarge",
+ ]
+)
+
+FUYU_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"adept/fuyu-8b": "https://huggingface.co/adept/fuyu-8b/resolve/main/config.json"}
+)
+
+GEMMA_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict({})
+
+GIT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json"}
+)
+
+GIT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/git-base"])
+
+GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json"}
+)
+
+GLPN_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["vinvino02/glpn-kitti"])
+
+GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "openai-community/gpt2": "https://huggingface.co/openai-community/gpt2/resolve/main/config.json",
+ "openai-community/gpt2-medium": "https://huggingface.co/openai-community/gpt2-medium/resolve/main/config.json",
+ "openai-community/gpt2-large": "https://huggingface.co/openai-community/gpt2-large/resolve/main/config.json",
+ "openai-community/gpt2-xl": "https://huggingface.co/openai-community/gpt2-xl/resolve/main/config.json",
+ "distilbert/distilgpt2": "https://huggingface.co/distilbert/distilgpt2/resolve/main/config.json",
+ }
+)
+
+GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "openai-community/gpt2",
+ "openai-community/gpt2-medium",
+ "openai-community/gpt2-large",
+ "openai-community/gpt2-xl",
+ "distilbert/distilgpt2",
+ ]
+)
+
+TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "openai-community/gpt2",
+ "openai-community/gpt2-medium",
+ "openai-community/gpt2-large",
+ "openai-community/gpt2-xl",
+ "distilbert/distilgpt2",
+ ]
+)
+
+GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json"
+ }
+)
+
+GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["bigcode/gpt_bigcode-santacoder"])
+
+GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json"}
+)
+
+GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["EleutherAI/gpt-neo-1.3B"])
+
+GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json"}
+)
+
+GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["EleutherAI/gpt-neox-20b"])
+
+GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json"}
+)
+
+GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json"]
+)
+
+GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json"}
+)
+
+GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["EleutherAI/gpt-j-6B"])
+
+GPTSAN_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "tanreinama/GPTSAN-2.8B-spout_is_uniform": "https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"
+ }
+)
+
+GPTSAN_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["Tanrei/GPTSAN-japanese"])
+
+GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"graphormer-base": "https://huggingface.co/clefourrier/graphormer-base-pcqm4mv2/resolve/main/config.json"}
+)
+
+GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["clefourrier/graphormer-base-pcqm4mv1", "clefourrier/graphormer-base-pcqm4mv2"]
+)
+
+GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"nvidia/groupvit-gcc-yfcc": "https://huggingface.co/nvidia/groupvit-gcc-yfcc/resolve/main/config.json"}
+)
+
+GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["nvidia/groupvit-gcc-yfcc"])
+
+TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["nvidia/groupvit-gcc-yfcc"])
+
+HUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"facebook/hubert-base-ls960": "https://huggingface.co/facebook/hubert-base-ls960/resolve/main/config.json"}
+)
+
+HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/hubert-base-ls960"])
+
+TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/hubert-base-ls960"])
+
+IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
+ "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
+ "kssteven/ibert-roberta-large-mnli": "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json",
+ }
+)
+
+IBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["kssteven/ibert-roberta-base", "kssteven/ibert-roberta-large", "kssteven/ibert-roberta-large-mnli"]
+)
+
+IDEFICS_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "HuggingFaceM4/idefics-9b": "https://huggingface.co/HuggingFaceM4/idefics-9b/blob/main/config.json",
+ "HuggingFaceM4/idefics-80b": "https://huggingface.co/HuggingFaceM4/idefics-80b/blob/main/config.json",
+ }
+)
+
+IDEFICS_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["HuggingFaceM4/idefics-9b", "HuggingFaceM4/idefics-80b"])
+
+IMAGEGPT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"openai/imagegpt-small": "", "openai/imagegpt-medium": "", "openai/imagegpt-large": ""}
+)
+
+IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["openai/imagegpt-small", "openai/imagegpt-medium", "openai/imagegpt-large"]
+)
+
+INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "huggingface/informer-tourism-monthly": "https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
+ }
+)
+
+INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["huggingface/informer-tourism-monthly"])
+
+INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json"
+ }
+)
+
+INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["Salesforce/instructblip-flan-t5-xl"])
+
+JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "openai/jukebox-5b-lyrics": "https://huggingface.co/openai/jukebox-5b-lyrics/blob/main/config.json",
+ "openai/jukebox-1b-lyrics": "https://huggingface.co/openai/jukebox-1b-lyrics/blob/main/config.json",
+ }
+)
+
+JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["openai/jukebox-1b-lyrics", "openai/jukebox-5b-lyrics"])
+
+KOSMOS2_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "microsoft/kosmos-2-patch14-224": "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/config.json"
+ }
+)
+
+KOSMOS2_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/kosmos-2-patch14-224"])
+
+LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "microsoft/layoutlm-base-uncased": "https://huggingface.co/microsoft/layoutlm-base-uncased/resolve/main/config.json",
+ "microsoft/layoutlm-large-uncased": "https://huggingface.co/microsoft/layoutlm-large-uncased/resolve/main/config.json",
+ }
+)
+
+LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["layoutlm-base-uncased", "layoutlm-large-uncased"])
+
+TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["microsoft/layoutlm-base-uncased", "microsoft/layoutlm-large-uncased"]
+)
+
+LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "layoutlmv2-base-uncased": "https://huggingface.co/microsoft/layoutlmv2-base-uncased/resolve/main/config.json",
+ "layoutlmv2-large-uncased": "https://huggingface.co/microsoft/layoutlmv2-large-uncased/resolve/main/config.json",
+ }
+)
+
+LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["microsoft/layoutlmv2-base-uncased", "microsoft/layoutlmv2-large-uncased"]
+)
+
+LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json"}
+)
+
+LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/layoutlmv3-base", "microsoft/layoutlmv3-large"])
+
+TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["microsoft/layoutlmv3-base", "microsoft/layoutlmv3-large"]
+)
+
+LED_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/config.json"}
+)
+
+LED_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["allenai/led-base-16384"])
+
+LEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json"}
+)
+
+LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/levit-128S"])
+
+LILT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "SCUT-DLVCLab/lilt-roberta-en-base": "https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
+ }
+)
+
+LILT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["SCUT-DLVCLab/lilt-roberta-en-base"])
+
+LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict({})
+
+LLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"llava-hf/llava-v1.5-7b": "https://huggingface.co/llava-hf/llava-v1.5-7b/resolve/main/config.json"}
+)
+
+LLAVA_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["llava-hf/llava-1.5-7b-hf", "llava-hf/llava-1.5-13b-hf", "llava-hf/bakLlava-v1-hf"]
+)
+
+LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
+ "allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
+ "allenai/longformer-large-4096-finetuned-triviaqa": "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json",
+ "allenai/longformer-base-4096-extra.pos.embd.only": "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json",
+ "allenai/longformer-large-4096-extra.pos.embd.only": "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json",
+ }
+)
+
+LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "allenai/longformer-base-4096",
+ "allenai/longformer-large-4096",
+ "allenai/longformer-large-4096-finetuned-triviaqa",
+ "allenai/longformer-base-4096-extra.pos.embd.only",
+ "allenai/longformer-large-4096-extra.pos.embd.only",
+ ]
+)
+
+TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "allenai/longformer-base-4096",
+ "allenai/longformer-large-4096",
+ "allenai/longformer-large-4096-finetuned-triviaqa",
+ "allenai/longformer-base-4096-extra.pos.embd.only",
+ "allenai/longformer-large-4096-extra.pos.embd.only",
+ ]
+)
+
+LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "google/long-t5-local-base": "https://huggingface.co/google/long-t5-local-base/blob/main/config.json",
+ "google/long-t5-local-large": "https://huggingface.co/google/long-t5-local-large/blob/main/config.json",
+ "google/long-t5-tglobal-base": "https://huggingface.co/google/long-t5-tglobal-base/blob/main/config.json",
+ "google/long-t5-tglobal-large": "https://huggingface.co/google/long-t5-tglobal-large/blob/main/config.json",
+ }
+)
+
+LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "google/long-t5-local-base",
+ "google/long-t5-local-large",
+ "google/long-t5-tglobal-base",
+ "google/long-t5-tglobal-large",
+ ]
+)
+
+LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
+ "studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
+ }
+)
+
+LUKE_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["studio-ousia/luke-base", "studio-ousia/luke-large"])
+
+LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json"}
+)
+
+TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["unc-nlp/lxmert-base-uncased"])
+
+M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/config.json"}
+)
+
+M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/m2m100_418M"])
+
+MAMBA_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"state-spaces/mamba-2.8b": "https://huggingface.co/state-spaces/mamba-2.8b/resolve/main/config.json"}
+)
+
+MAMBA_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList([])
+
+MARKUPLM_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
+ "microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
+ }
+)
+
+MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/markuplm-base", "microsoft/markuplm-large"])
+
+MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "facebook/mask2former-swin-small-coco-instance": "https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
+ }
+)
+
+MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/mask2former-swin-small-coco-instance"])
+
+MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "facebook/maskformer-swin-base-ade": "https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
+ }
+)
+
+MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/maskformer-swin-base-ade"])
+
+MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"mnaylor/mega-base-wikitext": "https://huggingface.co/mnaylor/mega-base-wikitext/resolve/main/config.json"}
+)
+
+MEGA_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["mnaylor/mega-base-wikitext"])
+
+MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict({})
+
+MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["nvidia/megatron-bert-cased-345m"])
+
+MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json"}
+)
+
+MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["alibaba-damo/mgp-str-base"])
+
+MISTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "mistralai/Mistral-7B-v0.1": "https://huggingface.co/mistralai/Mistral-7B-v0.1/resolve/main/config.json",
+ "mistralai/Mistral-7B-Instruct-v0.1": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1/resolve/main/config.json",
+ }
+)
+
+MIXTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"mistral-ai/Mixtral-8x7B": "https://huggingface.co/mistral-ai/Mixtral-8x7B/resolve/main/config.json"}
+)
+
+MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"google/mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/config.json"}
+)
+
+MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["google/mobilebert-uncased"])
+
+TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["google/mobilebert-uncased"])
+
+MOBILENET_V1_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "google/mobilenet_v1_1.0_224": "https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json",
+ "google/mobilenet_v1_0.75_192": "https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json",
+ }
+)
+
+MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["google/mobilenet_v1_1.0_224", "google/mobilenet_v1_0.75_192"]
+)
+
+MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "google/mobilenet_v2_1.4_224": "https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json",
+ "google/mobilenet_v2_1.0_224": "https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json",
+ "google/mobilenet_v2_0.75_160": "https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json",
+ "google/mobilenet_v2_0.35_96": "https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json",
+ }
+)
+
+MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "google/mobilenet_v2_1.4_224",
+ "google/mobilenet_v2_1.0_224",
+ "google/mobilenet_v2_0.37_160",
+ "google/mobilenet_v2_0.35_96",
+ ]
+)
+
+MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "apple/mobilevit-small": "https://huggingface.co/apple/mobilevit-small/resolve/main/config.json",
+ "apple/mobilevit-x-small": "https://huggingface.co/apple/mobilevit-x-small/resolve/main/config.json",
+ "apple/mobilevit-xx-small": "https://huggingface.co/apple/mobilevit-xx-small/resolve/main/config.json",
+ "apple/deeplabv3-mobilevit-small": "https://huggingface.co/apple/deeplabv3-mobilevit-small/resolve/main/config.json",
+ "apple/deeplabv3-mobilevit-x-small": "https://huggingface.co/apple/deeplabv3-mobilevit-x-small/resolve/main/config.json",
+ "apple/deeplabv3-mobilevit-xx-small": "https://huggingface.co/apple/deeplabv3-mobilevit-xx-small/resolve/main/config.json",
+ }
+)
+
+MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "apple/mobilevit-small",
+ "apple/mobilevit-x-small",
+ "apple/mobilevit-xx-small",
+ "apple/deeplabv3-mobilevit-small",
+ "apple/deeplabv3-mobilevit-x-small",
+ "apple/deeplabv3-mobilevit-xx-small",
+ ]
+)
+
+TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "apple/mobilevit-small",
+ "apple/mobilevit-x-small",
+ "apple/mobilevit-xx-small",
+ "apple/deeplabv3-mobilevit-small",
+ "apple/deeplabv3-mobilevit-x-small",
+ "apple/deeplabv3-mobilevit-xx-small",
+ ]
+)
+
+MOBILEVITV2_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"apple/mobilevitv2-1.0": "https://huggingface.co/apple/mobilevitv2-1.0/resolve/main/config.json"}
+)
+
+MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["apple/mobilevitv2-1.0-imagenet1k-256"])
+
+MPNET_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"microsoft/mpnet-base": "https://huggingface.co/microsoft/mpnet-base/resolve/main/config.json"}
+)
+
+MPNET_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/mpnet-base"])
+
+TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/mpnet-base"])
+
+MPT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"mosaicml/mpt-7b": "https://huggingface.co/mosaicml/mpt-7b/resolve/main/config.json"}
+)
+
+MPT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "mosaicml/mpt-7b",
+ "mosaicml/mpt-7b-storywriter",
+ "mosaicml/mpt-7b-instruct",
+ "mosaicml/mpt-7b-8k",
+ "mosaicml/mpt-7b-8k-instruct",
+ "mosaicml/mpt-7b-8k-chat",
+ "mosaicml/mpt-30b",
+ "mosaicml/mpt-30b-instruct",
+ "mosaicml/mpt-30b-chat",
+ ]
+)
+
+MRA_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json"}
+)
+
+MRA_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["uw-madison/mra-base-512-4"])
+
+MUSICGEN_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"facebook/musicgen-small": "https://huggingface.co/facebook/musicgen-small/resolve/main/config.json"}
+)
+
+MUSICGEN_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/musicgen-small"])
+
+MUSICGEN_MELODY_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"facebook/musicgen-melody": "https://huggingface.co/facebook/musicgen-melody/resolve/main/config.json"}
+)
+
+MUSICGEN_MELODY_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/musicgen-melody"])
+
+MVP_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "RUCAIBox/mvp",
+ "RUCAIBox/mvp-data-to-text",
+ "RUCAIBox/mvp-open-dialog",
+ "RUCAIBox/mvp-question-answering",
+ "RUCAIBox/mvp-question-generation",
+ "RUCAIBox/mvp-story",
+ "RUCAIBox/mvp-summarization",
+ "RUCAIBox/mvp-task-dialog",
+ "RUCAIBox/mtl-data-to-text",
+ "RUCAIBox/mtl-multi-task",
+ "RUCAIBox/mtl-open-dialog",
+ "RUCAIBox/mtl-question-answering",
+ "RUCAIBox/mtl-question-generation",
+ "RUCAIBox/mtl-story",
+ "RUCAIBox/mtl-summarization",
+ ]
+)
+
+NAT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json"}
+)
+
+NAT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["shi-labs/nat-mini-in1k-224"])
+
+NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json"}
+)
+
+NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["sijunhe/nezha-cn-base", "sijunhe/nezha-cn-large", "sijunhe/nezha-base-wwm", "sijunhe/nezha-large-wwm"]
+)
+
+NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json"}
+)
+
+NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/nllb-moe-54b"])
+
+NYSTROMFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"uw-madison/nystromformer-512": "https://huggingface.co/uw-madison/nystromformer-512/resolve/main/config.json"}
+)
+
+NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["uw-madison/nystromformer-512"])
+
+OLMO_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "allenai/OLMo-1B-hf": "https://huggingface.co/allenai/OLMo-1B-hf/resolve/main/config.json",
+ "allenai/OLMo-7B-hf": "https://huggingface.co/allenai/OLMo-7B-hf/resolve/main/config.json",
+ "allenai/OLMo-7B-Twin-2T-hf": "https://huggingface.co/allenai/OLMo-7B-Twin-2T-hf/resolve/main/config.json",
+ }
+)
+
+ONEFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "shi-labs/oneformer_ade20k_swin_tiny": "https://huggingface.co/shi-labs/oneformer_ade20k_swin_tiny/blob/main/config.json"
+ }
+)
+
+ONEFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["shi-labs/oneformer_ade20k_swin_tiny"])
+
+OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"openai-community/openai-gpt": "https://huggingface.co/openai-community/openai-gpt/resolve/main/config.json"}
+)
+
+OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["openai-community/openai-gpt"])
+
+TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["openai-community/openai-gpt"])
+
+OPT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "facebook/opt-125m",
+ "facebook/opt-350m",
+ "facebook/opt-1.3b",
+ "facebook/opt-2.7b",
+ "facebook/opt-6.7b",
+ "facebook/opt-13b",
+ "facebook/opt-30b",
+ ]
+)
+
+OWLV2_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"google/owlv2-base-patch16": "https://huggingface.co/google/owlv2-base-patch16/resolve/main/config.json"}
+)
+
+OWLV2_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["google/owlv2-base-patch16-ensemble"])
+
+OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json",
+ "google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json",
+ "google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json",
+ }
+)
+
+OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["google/owlvit-base-patch32", "google/owlvit-base-patch16", "google/owlvit-large-patch14"]
+)
+
+PATCHTSMIXER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "ibm/patchtsmixer-etth1-pretrain": "https://huggingface.co/ibm/patchtsmixer-etth1-pretrain/resolve/main/config.json"
+ }
+)
+
+PATCHTSMIXER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["ibm/patchtsmixer-etth1-pretrain"])
+
+PATCHTST_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"ibm/patchtst-base": "https://huggingface.co/ibm/patchtst-base/resolve/main/config.json"}
+)
+
+PATCHTST_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["ibm/patchtst-etth1-pretrain"])
+
+PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"google/pegasus-large": "https://huggingface.co/google/pegasus-large/resolve/main/config.json"}
+)
+
+PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "google/pegasus-x-base": "https://huggingface.co/google/pegasus-x-base/resolve/main/config.json",
+ "google/pegasus-x-large": "https://huggingface.co/google/pegasus-x-large/resolve/main/config.json",
+ }
+)
+
+PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["google/pegasus-x-base", "google/pegasus-x-large"])
+
+PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json"}
+)
+
+PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["deepmind/language-perceiver"])
+
+PERSIMMON_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"adept/persimmon-8b-base": "https://huggingface.co/adept/persimmon-8b-base/resolve/main/config.json"}
+)
+
+PHI_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "microsoft/phi-1": "https://huggingface.co/microsoft/phi-1/resolve/main/config.json",
+ "microsoft/phi-1_5": "https://huggingface.co/microsoft/phi-1_5/resolve/main/config.json",
+ "microsoft/phi-2": "https://huggingface.co/microsoft/phi-2/resolve/main/config.json",
+ }
+)
+
+PHI_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/phi-1", "microsoft/phi-1_5", "microsoft/phi-2"])
+
+PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "google/pix2struct-textcaps-base": "https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
+ }
+)
+
+PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "google/pix2struct-textcaps-base",
+ "google/pix2struct-textcaps-large",
+ "google/pix2struct-base",
+ "google/pix2struct-large",
+ "google/pix2struct-ai2d-base",
+ "google/pix2struct-ai2d-large",
+ "google/pix2struct-widget-captioning-base",
+ "google/pix2struct-widget-captioning-large",
+ "google/pix2struct-screen2words-base",
+ "google/pix2struct-screen2words-large",
+ "google/pix2struct-docvqa-base",
+ "google/pix2struct-docvqa-large",
+ "google/pix2struct-ocrvqa-base",
+ "google/pix2struct-ocrvqa-large",
+ "google/pix2struct-chartqa-base",
+ "google/pix2struct-inforgraphics-vqa-base",
+ "google/pix2struct-inforgraphics-vqa-large",
+ ]
+)
+
+PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"uclanlp/plbart-base": "https://huggingface.co/uclanlp/plbart-base/resolve/main/config.json"}
+)
+
+PLBART_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["uclanlp/plbart-base", "uclanlp/plbart-cs-java", "uclanlp/plbart-multi_task-all"]
+)
+
+POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json"}
+)
+
+POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["sail/poolformer_s12"])
+
+POP2PIANO_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"sweetcocoa/pop2piano": "https://huggingface.co/sweetcocoa/pop2piano/blob/main/config.json"}
+)
+
+POP2PIANO_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["sweetcocoa/pop2piano"])
+
+PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "microsoft/prophetnet-large-uncased": "https://huggingface.co/microsoft/prophetnet-large-uncased/resolve/main/config.json"
+ }
+)
+
+PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/prophetnet-large-uncased"])
+
+PVT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict({"pvt-tiny-224": "https://huggingface.co/Zetatech/pvt-tiny-224"})
+
+PVT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["Zetatech/pvt-tiny-224"])
+
+QDQBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"google-bert/bert-base-uncased": "https://huggingface.co/google-bert/bert-base-uncased/resolve/main/config.json"}
+)
+
+QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["google-bert/bert-base-uncased"])
+
+QWEN2_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"Qwen/Qwen2-7B-beta": "https://huggingface.co/Qwen/Qwen2-7B-beta/resolve/main/config.json"}
+)
+
+REALM_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "google/realm-cc-news-pretrained-embedder": "https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json",
+ "google/realm-cc-news-pretrained-encoder": "https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json",
+ "google/realm-cc-news-pretrained-scorer": "https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json",
+ "google/realm-cc-news-pretrained-openqa": "https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json",
+ "google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
+ "google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
+ "google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
+ "google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
+ }
+)
+
+REALM_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "google/realm-cc-news-pretrained-embedder",
+ "google/realm-cc-news-pretrained-encoder",
+ "google/realm-cc-news-pretrained-scorer",
+ "google/realm-cc-news-pretrained-openqa",
+ "google/realm-orqa-nq-openqa",
+ "google/realm-orqa-nq-reader",
+ "google/realm-orqa-wq-openqa",
+ "google/realm-orqa-wq-reader",
+ ]
+)
+
+REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "google/reformer-crime-and-punishment": "https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/config.json",
+ "google/reformer-enwik8": "https://huggingface.co/google/reformer-enwik8/resolve/main/config.json",
+ }
+)
+
+REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["google/reformer-crime-and-punishment", "google/reformer-enwik8"]
+)
+
+REGNET_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"facebook/regnet-y-040": "https://huggingface.co/facebook/regnet-y-040/blob/main/config.json"}
+)
+
+REGNET_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/regnet-y-040"])
+
+TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/regnet-y-040"])
+
+REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"google/rembert": "https://huggingface.co/google/rembert/resolve/main/config.json"}
+)
+
+REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["google/rembert"])
+
+TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["google/rembert"])
+
+RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json"}
+)
+
+RESNET_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/resnet-50"])
+
+TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/resnet-50"])
+
+ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "FacebookAI/roberta-base": "https://huggingface.co/FacebookAI/roberta-base/resolve/main/config.json",
+ "FacebookAI/roberta-large": "https://huggingface.co/FacebookAI/roberta-large/resolve/main/config.json",
+ "FacebookAI/roberta-large-mnli": "https://huggingface.co/FacebookAI/roberta-large-mnli/resolve/main/config.json",
+ "distilbert/distilroberta-base": "https://huggingface.co/distilbert/distilroberta-base/resolve/main/config.json",
+ "openai-community/roberta-base-openai-detector": "https://huggingface.co/openai-community/roberta-base-openai-detector/resolve/main/config.json",
+ "openai-community/roberta-large-openai-detector": "https://huggingface.co/openai-community/roberta-large-openai-detector/resolve/main/config.json",
+ }
+)
+
+ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "FacebookAI/roberta-base",
+ "FacebookAI/roberta-large",
+ "FacebookAI/roberta-large-mnli",
+ "distilbert/distilroberta-base",
+ "openai-community/roberta-base-openai-detector",
+ "openai-community/roberta-large-openai-detector",
+ ]
+)
+
+TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "FacebookAI/roberta-base",
+ "FacebookAI/roberta-large",
+ "FacebookAI/roberta-large-mnli",
+ "distilbert/distilroberta-base",
+ ]
+)
+
+ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "andreasmadsen/efficient_mlm_m0.40": "https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"
+ }
+)
+
+ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "andreasmadsen/efficient_mlm_m0.15",
+ "andreasmadsen/efficient_mlm_m0.20",
+ "andreasmadsen/efficient_mlm_m0.30",
+ "andreasmadsen/efficient_mlm_m0.40",
+ "andreasmadsen/efficient_mlm_m0.50",
+ "andreasmadsen/efficient_mlm_m0.60",
+ "andreasmadsen/efficient_mlm_m0.70",
+ "andreasmadsen/efficient_mlm_m0.80",
+ ]
+)
+
+TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "andreasmadsen/efficient_mlm_m0.15",
+ "andreasmadsen/efficient_mlm_m0.20",
+ "andreasmadsen/efficient_mlm_m0.30",
+ "andreasmadsen/efficient_mlm_m0.40",
+ "andreasmadsen/efficient_mlm_m0.50",
+ "andreasmadsen/efficient_mlm_m0.60",
+ "andreasmadsen/efficient_mlm_m0.70",
+ "andreasmadsen/efficient_mlm_m0.80",
+ ]
+)
+
+ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json"}
+)
+
+ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["weiweishi/roc-bert-base-zh"])
+
+ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
+ "junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
+ "junnyu/roformer_chinese_char_small": "https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json",
+ "junnyu/roformer_chinese_char_base": "https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json",
+ "junnyu/roformer_small_discriminator": "https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json",
+ "junnyu/roformer_small_generator": "https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json",
+ }
+)
+
+ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "junnyu/roformer_chinese_small",
+ "junnyu/roformer_chinese_base",
+ "junnyu/roformer_chinese_char_small",
+ "junnyu/roformer_chinese_char_base",
+ "junnyu/roformer_small_discriminator",
+ "junnyu/roformer_small_generator",
+ ]
+)
+
+TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "junnyu/roformer_chinese_small",
+ "junnyu/roformer_chinese_base",
+ "junnyu/roformer_chinese_char_small",
+ "junnyu/roformer_chinese_char_base",
+ "junnyu/roformer_small_discriminator",
+ "junnyu/roformer_small_generator",
+ ]
+)
+
+RWKV_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
+ "RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
+ "RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
+ "RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
+ "RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
+ "RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
+ "RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
+ "RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
+ "RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
+ "RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
+ }
+)
+
+RWKV_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "RWKV/rwkv-4-169m-pile",
+ "RWKV/rwkv-4-430m-pile",
+ "RWKV/rwkv-4-1b5-pile",
+ "RWKV/rwkv-4-3b-pile",
+ "RWKV/rwkv-4-7b-pile",
+ "RWKV/rwkv-4-14b-pile",
+ "RWKV/rwkv-raven-1b5",
+ "RWKV/rwkv-raven-3b",
+ "RWKV/rwkv-raven-7b",
+ "RWKV/rwkv-raven-14b",
+ ]
+)
+
+SAM_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "facebook/sam-vit-huge": "https://huggingface.co/facebook/sam-vit-huge/resolve/main/config.json",
+ "facebook/sam-vit-large": "https://huggingface.co/facebook/sam-vit-large/resolve/main/config.json",
+ "facebook/sam-vit-base": "https://huggingface.co/facebook/sam-vit-base/resolve/main/config.json",
+ }
+)
+
+SAM_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["facebook/sam-vit-huge", "facebook/sam-vit-large", "facebook/sam-vit-base"]
+)
+
+TF_SAM_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["facebook/sam-vit-huge", "facebook/sam-vit-large", "facebook/sam-vit-base"]
+)
+
+SEAMLESS_M4T_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "facebook/hf-seamless-m4t-medium": "https://huggingface.co/facebook/hf-seamless-m4t-medium/resolve/main/config.json"
+ }
+)
+
+SEAMLESS_M4T_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/hf-seamless-m4t-medium"])
+
+SEAMLESS_M4T_V2_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"": "https://huggingface.co//resolve/main/config.json"}
+)
+
+SEAMLESS_M4T_V2_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/seamless-m4t-v2-large"])
+
+SEGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "nvidia/segformer-b0-finetuned-ade-512-512": "https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
+ }
+)
+
+SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["nvidia/segformer-b0-finetuned-ade-512-512"])
+
+TF_SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["nvidia/segformer-b0-finetuned-ade-512-512"])
+
+SEGGPT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"BAAI/seggpt-vit-large": "https://huggingface.co/BAAI/seggpt-vit-large/resolve/main/config.json"}
+)
+
+SEGGPT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["BAAI/seggpt-vit-large"])
+
+SEW_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json"}
+)
+
+SEW_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["asapp/sew-tiny-100k", "asapp/sew-small-100k", "asapp/sew-mid-100k"]
+)
+
+SEW_D_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json"}
+)
+
+SEW_D_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "asapp/sew-d-tiny-100k",
+ "asapp/sew-d-small-100k",
+ "asapp/sew-d-mid-100k",
+ "asapp/sew-d-mid-k127-100k",
+ "asapp/sew-d-base-100k",
+ "asapp/sew-d-base-plus-100k",
+ "asapp/sew-d-mid-400k",
+ "asapp/sew-d-mid-k127-400k",
+ "asapp/sew-d-base-plus-400k",
+ ]
+)
+
+SIGLIP_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "google/siglip-base-patch16-224": "https://huggingface.co/google/siglip-base-patch16-224/resolve/main/config.json"
+ }
+)
+
+SIGLIP_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["google/siglip-base-patch16-224"])
+
+SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "facebook/s2t-small-librispeech-asr": "https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
+ }
+)
+
+SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/s2t-small-librispeech-asr"])
+
+TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/s2t-small-librispeech-asr"])
+
+SPEECH_TO_TEXT_2_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "facebook/s2t-wav2vec2-large-en-de": "https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
+ }
+)
+
+SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "microsoft/speecht5_asr": "https://huggingface.co/microsoft/speecht5_asr/resolve/main/config.json",
+ "microsoft/speecht5_tts": "https://huggingface.co/microsoft/speecht5_tts/resolve/main/config.json",
+ "microsoft/speecht5_vc": "https://huggingface.co/microsoft/speecht5_vc/resolve/main/config.json",
+ }
+)
+
+SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"microsoft/speecht5_hifigan": "https://huggingface.co/microsoft/speecht5_hifigan/resolve/main/config.json"}
+)
+
+SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["microsoft/speecht5_asr", "microsoft/speecht5_tts", "microsoft/speecht5_vc"]
+)
+
+SPLINTER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "tau/splinter-base": "https://huggingface.co/tau/splinter-base/resolve/main/config.json",
+ "tau/splinter-base-qass": "https://huggingface.co/tau/splinter-base-qass/resolve/main/config.json",
+ "tau/splinter-large": "https://huggingface.co/tau/splinter-large/resolve/main/config.json",
+ "tau/splinter-large-qass": "https://huggingface.co/tau/splinter-large-qass/resolve/main/config.json",
+ }
+)
+
+SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["tau/splinter-base", "tau/splinter-base-qass", "tau/splinter-large", "tau/splinter-large-qass"]
+)
+
+SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "squeezebert/squeezebert-uncased": "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/config.json",
+ "squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/config.json",
+ "squeezebert/squeezebert-mnli-headless": "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/config.json",
+ }
+)
+
+SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["squeezebert/squeezebert-uncased", "squeezebert/squeezebert-mnli", "squeezebert/squeezebert-mnli-headless"]
+)
+
+STABLELM_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"stabilityai/stablelm-3b-4e1t": "https://huggingface.co/stabilityai/stablelm-3b-4e1t/resolve/main/config.json"}
+)
+
+STARCODER2_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict({})
+
+SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"MBZUAI/swiftformer-xs": "https://huggingface.co/MBZUAI/swiftformer-xs/resolve/main/config.json"}
+)
+
+SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["MBZUAI/swiftformer-xs"])
+
+SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "microsoft/swin-tiny-patch4-window7-224": "https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
+ }
+)
+
+SWIN_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/swin-tiny-patch4-window7-224"])
+
+TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/swin-tiny-patch4-window7-224"])
+
+SWIN2SR_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "caidas/swin2sr-classicalsr-x2-64": "https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
+ }
+)
+
+SWIN2SR_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["caidas/swin2SR-classical-sr-x2-64"])
+
+SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "microsoft/swinv2-tiny-patch4-window8-256": "https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
+ }
+)
+
+SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/swinv2-tiny-patch4-window8-256"])
+
+SWITCH_TRANSFORMERS_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json"}
+)
+
+SWITCH_TRANSFORMERS_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "google/switch-base-8",
+ "google/switch-base-16",
+ "google/switch-base-32",
+ "google/switch-base-64",
+ "google/switch-base-128",
+ "google/switch-base-256",
+ "google/switch-large-128",
+ "google/switch-xxl-128",
+ "google/switch-c-2048",
+ ]
+)
+
+T5_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "google-t5/t5-small": "https://huggingface.co/google-t5/t5-small/resolve/main/config.json",
+ "google-t5/t5-base": "https://huggingface.co/google-t5/t5-base/resolve/main/config.json",
+ "google-t5/t5-large": "https://huggingface.co/google-t5/t5-large/resolve/main/config.json",
+ "google-t5/t5-3b": "https://huggingface.co/google-t5/t5-3b/resolve/main/config.json",
+ "google-t5/t5-11b": "https://huggingface.co/google-t5/t5-11b/resolve/main/config.json",
+ }
+)
+
+T5_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["google-t5/t5-small", "google-t5/t5-base", "google-t5/t5-large", "google-t5/t5-3b", "google-t5/t5-11b"]
+)
+
+TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["google-t5/t5-small", "google-t5/t5-base", "google-t5/t5-large", "google-t5/t5-3b", "google-t5/t5-11b"]
+)
+
+TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "microsoft/table-transformer-detection": "https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
+ }
+)
+
+TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/table-transformer-detection"])
+
+TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "google/tapas-base-finetuned-sqa": "https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json",
+ "google/tapas-base-finetuned-wtq": "https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json",
+ "google/tapas-base-finetuned-wikisql-supervised": "https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json",
+ "google/tapas-base-finetuned-tabfact": "https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json",
+ }
+)
+
+TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "google/tapas-large",
+ "google/tapas-large-finetuned-sqa",
+ "google/tapas-large-finetuned-wtq",
+ "google/tapas-large-finetuned-wikisql-supervised",
+ "google/tapas-large-finetuned-tabfact",
+ "google/tapas-base",
+ "google/tapas-base-finetuned-sqa",
+ "google/tapas-base-finetuned-wtq",
+ "google/tapas-base-finetuned-wikisql-supervised",
+ "google/tapas-base-finetuned-tabfact",
+ "google/tapas-small",
+ "google/tapas-small-finetuned-sqa",
+ "google/tapas-small-finetuned-wtq",
+ "google/tapas-small-finetuned-wikisql-supervised",
+ "google/tapas-small-finetuned-tabfact",
+ "google/tapas-mini",
+ "google/tapas-mini-finetuned-sqa",
+ "google/tapas-mini-finetuned-wtq",
+ "google/tapas-mini-finetuned-wikisql-supervised",
+ "google/tapas-mini-finetuned-tabfact",
+ "google/tapas-tiny",
+ "google/tapas-tiny-finetuned-sqa",
+ "google/tapas-tiny-finetuned-wtq",
+ "google/tapas-tiny-finetuned-wikisql-supervised",
+ "google/tapas-tiny-finetuned-tabfact",
+ ]
+)
+
+TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "google/tapas-large",
+ "google/tapas-large-finetuned-sqa",
+ "google/tapas-large-finetuned-wtq",
+ "google/tapas-large-finetuned-wikisql-supervised",
+ "google/tapas-large-finetuned-tabfact",
+ "google/tapas-base",
+ "google/tapas-base-finetuned-sqa",
+ "google/tapas-base-finetuned-wtq",
+ "google/tapas-base-finetuned-wikisql-supervised",
+ "google/tapas-base-finetuned-tabfact",
+ "google/tapas-small",
+ "google/tapas-small-finetuned-sqa",
+ "google/tapas-small-finetuned-wtq",
+ "google/tapas-small-finetuned-wikisql-supervised",
+ "google/tapas-small-finetuned-tabfact",
+ "google/tapas-mini",
+ "google/tapas-mini-finetuned-sqa",
+ "google/tapas-mini-finetuned-wtq",
+ "google/tapas-mini-finetuned-wikisql-supervised",
+ "google/tapas-mini-finetuned-tabfact",
+ "google/tapas-tiny",
+ "google/tapas-tiny-finetuned-sqa",
+ "google/tapas-tiny-finetuned-wtq",
+ "google/tapas-tiny-finetuned-wikisql-supervised",
+ "google/tapas-tiny-finetuned-tabfact",
+ ]
+)
+
+TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "huggingface/time-series-transformer-tourism-monthly": "https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
+ }
+)
+
+TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["huggingface/time-series-transformer-tourism-monthly"]
+)
+
+TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json"}
+)
+
+TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/timesformer-base-finetuned-k400"])
+
+TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "microsoft/trocr-base-handwritten": "https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
+ }
+)
+
+TROCR_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/trocr-base-handwritten"])
+
+TVLT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"ZinengTang/tvlt-base": "https://huggingface.co/ZinengTang/tvlt-base/blob/main/config.json"}
+)
+
+TVLT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["ZinengTang/tvlt-base"])
+
+TVP_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"Intel/tvp-base": "https://huggingface.co/Intel/tvp-base/resolve/main/config.json"}
+)
+
+TVP_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["Intel/tvp-base", "Intel/tvp-base-ANet"])
+
+UDOP_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"microsoft/udop-large": "https://huggingface.co/microsoft/udop-large/resolve/main/config.json"}
+)
+
+UDOP_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/udop-large"])
+
+UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "microsoft/unispeech-large-1500h-cv": "https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
+ }
+)
+
+UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["microsoft/unispeech-large-1500h-cv", "microsoft/unispeech-large-multi-lingual-1500h-cv"]
+)
+
+UNISPEECH_SAT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "microsoft/unispeech-sat-base-100h-libri-ft": "https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
+ }
+)
+
+UNISPEECH_SAT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList([])
+
+UNIVNET_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"dg845/univnet-dev": "https://huggingface.co/dg845/univnet-dev/resolve/main/config.json"}
+)
+
+UNIVNET_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["dg845/univnet-dev"])
+
+VIDEOMAE_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"MCG-NJU/videomae-base": "https://huggingface.co/MCG-NJU/videomae-base/resolve/main/config.json"}
+)
+
+VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["MCG-NJU/videomae-base"])
+
+VILT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"dandelin/vilt-b32-mlm": "https://huggingface.co/dandelin/vilt-b32-mlm/blob/main/config.json"}
+)
+
+VILT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["dandelin/vilt-b32-mlm"])
+
+VIPLLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"ybelkada/vip-llava-7b-hf": "https://huggingface.co/llava-hf/vip-llava-7b-hf/resolve/main/config.json"}
+)
+
+VIPLLAVA_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["llava-hf/vip-llava-7b-hf"])
+
+VISUAL_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json",
+ "uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json",
+ "uclanlp/visualbert-vqa-coco-pre": "https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json",
+ "uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json",
+ "uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json",
+ "uclanlp/visualbert-vcr-coco-pre": "https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json",
+ "uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json",
+ "uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json",
+ "uclanlp/visualbert-nlvr2-coco-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json",
+ }
+)
+
+VISUAL_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "uclanlp/visualbert-vqa",
+ "uclanlp/visualbert-vqa-pre",
+ "uclanlp/visualbert-vqa-coco-pre",
+ "uclanlp/visualbert-vcr",
+ "uclanlp/visualbert-vcr-pre",
+ "uclanlp/visualbert-vcr-coco-pre",
+ "uclanlp/visualbert-nlvr2",
+ "uclanlp/visualbert-nlvr2-pre",
+ "uclanlp/visualbert-nlvr2-coco-pre",
+ ]
+)
+
+VIT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json"}
+)
+
+VIT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["google/vit-base-patch16-224"])
+
+VIT_HYBRID_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"google/vit-hybrid-base-bit-384": "https://huggingface.co/vit-hybrid-base-bit-384/resolve/main/config.json"}
+)
+
+VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["google/vit-hybrid-base-bit-384"])
+
+VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json"}
+)
+
+VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/vit-mae-base"])
+
+VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json"}
+)
+
+VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/vit-msn-small"])
+
+VITDET_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"facebook/vit-det-base": "https://huggingface.co/facebook/vit-det-base/resolve/main/config.json"}
+)
+
+VITDET_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/vit-det-base"])
+
+VITMATTE_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "hustvl/vitmatte-small-composition-1k": "https://huggingface.co/hustvl/vitmatte-small-composition-1k/resolve/main/config.json"
+ }
+)
+
+VITMATTE_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["hustvl/vitmatte-small-composition-1k"])
+
+VITS_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"facebook/mms-tts-eng": "https://huggingface.co/facebook/mms-tts-eng/resolve/main/config.json"}
+)
+
+VITS_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/mms-tts-eng"])
+
+VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "google/vivit-b-16x2-kinetics400": "https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
+ }
+)
+
+VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["google/vivit-b-16x2-kinetics400"])
+
+WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json"}
+)
+
+WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "facebook/wav2vec2-base-960h",
+ "facebook/wav2vec2-large-960h",
+ "facebook/wav2vec2-large-960h-lv60",
+ "facebook/wav2vec2-large-960h-lv60-self",
+ ]
+)
+
+TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "facebook/wav2vec2-base-960h",
+ "facebook/wav2vec2-large-960h",
+ "facebook/wav2vec2-large-960h-lv60",
+ "facebook/wav2vec2-large-960h-lv60-self",
+ ]
+)
+
+WAV2VEC2_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"facebook/w2v-bert-2.0": "https://huggingface.co/facebook/w2v-bert-2.0/resolve/main/config.json"}
+)
+
+WAV2VEC2_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/w2v-bert-2.0"])
+
+WAV2VEC2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "facebook/wav2vec2-conformer-rel-pos-large": "https://huggingface.co/facebook/wav2vec2-conformer-rel-pos-large/resolve/main/config.json"
+ }
+)
+
+WAV2VEC2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/wav2vec2-conformer-rel-pos-large"])
+
+WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json"}
+)
+
+WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["microsoft/wavlm-base", "microsoft/wavlm-base-plus", "microsoft/wavlm-large"]
+)
+
+WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json"}
+)
+
+WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["openai/whisper-base"])
+
+TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["openai/whisper-base"])
+
+XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"microsoft/xclip-base-patch32": "https://huggingface.co/microsoft/xclip-base-patch32/resolve/main/config.json"}
+)
+
+XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/xclip-base-patch32"])
+
+XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/config.json"}
+)
+
+XGLM_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/xglm-564M"])
+
+TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/xglm-564M"])
+
+XLM_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "FacebookAI/xlm-mlm-en-2048": "https://huggingface.co/FacebookAI/xlm-mlm-en-2048/resolve/main/config.json",
+ "FacebookAI/xlm-mlm-ende-1024": "https://huggingface.co/FacebookAI/xlm-mlm-ende-1024/resolve/main/config.json",
+ "FacebookAI/xlm-mlm-enfr-1024": "https://huggingface.co/FacebookAI/xlm-mlm-enfr-1024/resolve/main/config.json",
+ "FacebookAI/xlm-mlm-enro-1024": "https://huggingface.co/FacebookAI/xlm-mlm-enro-1024/resolve/main/config.json",
+ "FacebookAI/xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/FacebookAI/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json",
+ "FacebookAI/xlm-mlm-xnli15-1024": "https://huggingface.co/FacebookAI/xlm-mlm-xnli15-1024/resolve/main/config.json",
+ "FacebookAI/xlm-clm-enfr-1024": "https://huggingface.co/FacebookAI/xlm-clm-enfr-1024/resolve/main/config.json",
+ "FacebookAI/xlm-clm-ende-1024": "https://huggingface.co/FacebookAI/xlm-clm-ende-1024/resolve/main/config.json",
+ "FacebookAI/xlm-mlm-17-1280": "https://huggingface.co/FacebookAI/xlm-mlm-17-1280/resolve/main/config.json",
+ "FacebookAI/xlm-mlm-100-1280": "https://huggingface.co/FacebookAI/xlm-mlm-100-1280/resolve/main/config.json",
+ }
+)
+
+XLM_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "FacebookAI/xlm-mlm-en-2048",
+ "FacebookAI/xlm-mlm-ende-1024",
+ "FacebookAI/xlm-mlm-enfr-1024",
+ "FacebookAI/xlm-mlm-enro-1024",
+ "FacebookAI/xlm-mlm-tlm-xnli15-1024",
+ "FacebookAI/xlm-mlm-xnli15-1024",
+ "FacebookAI/xlm-clm-enfr-1024",
+ "FacebookAI/xlm-clm-ende-1024",
+ "FacebookAI/xlm-mlm-17-1280",
+ "FacebookAI/xlm-mlm-100-1280",
+ ]
+)
+
+TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "FacebookAI/xlm-mlm-en-2048",
+ "FacebookAI/xlm-mlm-ende-1024",
+ "FacebookAI/xlm-mlm-enfr-1024",
+ "FacebookAI/xlm-mlm-enro-1024",
+ "FacebookAI/xlm-mlm-tlm-xnli15-1024",
+ "FacebookAI/xlm-mlm-xnli15-1024",
+ "FacebookAI/xlm-clm-enfr-1024",
+ "FacebookAI/xlm-clm-ende-1024",
+ "FacebookAI/xlm-mlm-17-1280",
+ "FacebookAI/xlm-mlm-100-1280",
+ ]
+)
+
+XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "microsoft/xprophetnet-large-wiki100-cased": "https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
+ }
+)
+
+XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["microsoft/xprophetnet-large-wiki100-cased"])
+
+XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "FacebookAI/xlm-roberta-base": "https://huggingface.co/FacebookAI/xlm-roberta-base/resolve/main/config.json",
+ "FacebookAI/xlm-roberta-large": "https://huggingface.co/FacebookAI/xlm-roberta-large/resolve/main/config.json",
+ "FacebookAI/xlm-roberta-large-finetuned-conll02-dutch": "https://huggingface.co/FacebookAI/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json",
+ "FacebookAI/xlm-roberta-large-finetuned-conll02-spanish": "https://huggingface.co/FacebookAI/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json",
+ "FacebookAI/xlm-roberta-large-finetuned-conll03-english": "https://huggingface.co/FacebookAI/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json",
+ "FacebookAI/xlm-roberta-large-finetuned-conll03-german": "https://huggingface.co/FacebookAI/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json",
+ }
+)
+
+XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "FacebookAI/xlm-roberta-base",
+ "FacebookAI/xlm-roberta-large",
+ "FacebookAI/xlm-roberta-large-finetuned-conll02-dutch",
+ "FacebookAI/xlm-roberta-large-finetuned-conll02-spanish",
+ "FacebookAI/xlm-roberta-large-finetuned-conll03-english",
+ "FacebookAI/xlm-roberta-large-finetuned-conll03-german",
+ ]
+)
+
+TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "FacebookAI/xlm-roberta-base",
+ "FacebookAI/xlm-roberta-large",
+ "joeddav/xlm-roberta-large-xnli",
+ "cardiffnlp/twitter-xlm-roberta-base-sentiment",
+ ]
+)
+
+FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ ["FacebookAI/xlm-roberta-base", "FacebookAI/xlm-roberta-large"]
+)
+
+XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
+ "facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
+ }
+)
+
+XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["facebook/xlm-roberta-xl", "facebook/xlm-roberta-xxl"])
+
+XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "xlnet/xlnet-base-cased": "https://huggingface.co/xlnet/xlnet-base-cased/resolve/main/config.json",
+ "xlnet/xlnet-large-cased": "https://huggingface.co/xlnet/xlnet-large-cased/resolve/main/config.json",
+ }
+)
+
+XLNET_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["xlnet/xlnet-base-cased", "xlnet/xlnet-large-cased"])
+
+TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["xlnet/xlnet-base-cased", "xlnet/xlnet-large-cased"])
+
+XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {
+ "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
+ "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
+ "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
+ "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
+ "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
+ "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
+ "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
+ "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
+ "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
+ }
+)
+
+XMOD_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(
+ [
+ "facebook/xmod-base",
+ "facebook/xmod-large-prenorm",
+ "facebook/xmod-base-13-125k",
+ "facebook/xmod-base-30-125k",
+ "facebook/xmod-base-30-195k",
+ "facebook/xmod-base-60-125k",
+ "facebook/xmod-base-60-265k",
+ "facebook/xmod-base-75-125k",
+ "facebook/xmod-base-75-269k",
+ ]
+)
+
+YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json"}
+)
+
+YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["hustvl/yolos-small"])
+
+YOSO_PRETRAINED_CONFIG_ARCHIVE_MAP = DeprecatedDict(
+ {"uw-madison/yoso-4096": "https://huggingface.co/uw-madison/yoso-4096/resolve/main/config.json"}
+)
+
+YOSO_PRETRAINED_MODEL_ARCHIVE_LIST = DeprecatedList(["uw-madison/yoso-4096"])
+
+
+CONFIG_ARCHIVE_MAP_MAPPING_NAMES = OrderedDict(
+ [
+ # Add archive maps here)
+ ("albert", "ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("align", "ALIGN_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("altclip", "ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("audio-spectrogram-transformer", "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("autoformer", "AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("bark", "BARK_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("bart", "BART_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("beit", "BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("bert", "BERT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("big_bird", "BIG_BIRD_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("bigbird_pegasus", "BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("biogpt", "BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("bit", "BIT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("blenderbot", "BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("blenderbot-small", "BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("blip", "BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("blip-2", "BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("bloom", "BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("bridgetower", "BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("bros", "BROS_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("camembert", "CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("canine", "CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("chinese_clip", "CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("clap", "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST"),
+ ("clip", "CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("clipseg", "CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("clvp", "CLVP_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("codegen", "CODEGEN_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("conditional_detr", "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("convbert", "CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("convnext", "CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("convnextv2", "CONVNEXTV2_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("cpmant", "CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("ctrl", "CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("cvt", "CVT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("data2vec-audio", "DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("data2vec-text", "DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("data2vec-vision", "DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("deberta", "DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("deberta-v2", "DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("deformable_detr", "DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("deit", "DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("depth_anything", "DEPTH_ANYTHING_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("deta", "DETA_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("detr", "DETR_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("dinat", "DINAT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("dinov2", "DINOV2_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("distilbert", "DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("donut-swin", "DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("dpr", "DPR_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("dpt", "DPT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("efficientformer", "EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("efficientnet", "EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("electra", "ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("encodec", "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("ernie", "ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("ernie_m", "ERNIE_M_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("esm", "ESM_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("falcon", "FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("fastspeech2_conformer", "FASTSPEECH2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("flaubert", "FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("flava", "FLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("fnet", "FNET_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("focalnet", "FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("fsmt", "FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("funnel", "FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("fuyu", "FUYU_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("gemma", "GEMMA_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("git", "GIT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("glpn", "GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("gpt2", "GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("gpt_bigcode", "GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("gpt_neo", "GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("gpt_neox", "GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("gpt_neox_japanese", "GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("gptj", "GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("gptsan-japanese", "GPTSAN_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("graphormer", "GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("groupvit", "GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("hubert", "HUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("ibert", "IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("idefics", "IDEFICS_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("imagegpt", "IMAGEGPT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("informer", "INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("instructblip", "INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("jukebox", "JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("kosmos-2", "KOSMOS2_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("layoutlm", "LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("layoutlmv2", "LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("layoutlmv3", "LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("led", "LED_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("levit", "LEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("lilt", "LILT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("llama", "LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("llava", "LLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("longformer", "LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("longt5", "LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("luke", "LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("lxmert", "LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("m2m_100", "M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("mamba", "MAMBA_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("markuplm", "MARKUPLM_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("mask2former", "MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("maskformer", "MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("mbart", "MBART_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("mctct", "MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("mega", "MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("megatron-bert", "MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("mgp-str", "MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("mistral", "MISTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("mixtral", "MIXTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("mobilenet_v1", "MOBILENET_V1_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("mobilenet_v2", "MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("mobilevit", "MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("mobilevitv2", "MOBILEVITV2_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("mpnet", "MPNET_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("mpt", "MPT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("mra", "MRA_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("musicgen", "MUSICGEN_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("mvp", "MVP_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("nat", "NAT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("nezha", "NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("nllb-moe", "NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("nystromformer", "NYSTROMFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("oneformer", "ONEFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("olmo", "OLMO_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("open-llama", "OPEN_LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("openai-gpt", "OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("opt", "OPT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("owlv2", "OWLV2_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("owlvit", "OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("patchtsmixer", "PATCHTSMIXER_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("patchtst", "PATCHTST_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("pegasus", "PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("pegasus_x", "PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("perceiver", "PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("persimmon", "PERSIMMON_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("phi", "PHI_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("pix2struct", "PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("plbart", "PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("poolformer", "POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("pop2piano", "POP2PIANO_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("prophetnet", "PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("pvt", "PVT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("qdqbert", "QDQBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("qwen2", "QWEN2_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("realm", "REALM_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("regnet", "REGNET_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("rembert", "REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("resnet", "RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("retribert", "RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("roberta", "ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("roberta-prelayernorm", "ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("roc_bert", "ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("roformer", "ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("rwkv", "RWKV_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("sam", "SAM_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("seamless_m4t", "SEAMLESS_M4T_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("seamless_m4t_v2", "SEAMLESS_M4T_V2_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("segformer", "SEGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("seggpt", "SEGGPT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("sew", "SEW_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("sew-d", "SEW_D_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("siglip", "SIGLIP_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("speech_to_text", "SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("speech_to_text_2", "SPEECH_TO_TEXT_2_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("speecht5", "SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("splinter", "SPLINTER_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("squeezebert", "SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("stablelm", "STABLELM_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("starcoder2", "STARCODER2_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("swiftformer", "SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("swin", "SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("swin2sr", "SWIN2SR_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("swinv2", "SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("switch_transformers", "SWITCH_TRANSFORMERS_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("t5", "T5_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("table-transformer", "TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("tapas", "TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("time_series_transformer", "TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("timesformer", "TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("transfo-xl", "TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("tvlt", "TVLT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("tvp", "TVP_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("udop", "UDOP_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("unispeech", "UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("unispeech-sat", "UNISPEECH_SAT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("univnet", "UNIVNET_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("van", "VAN_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("videomae", "VIDEOMAE_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("vilt", "VILT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("vipllava", "VIPLLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("visual_bert", "VISUAL_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("vit", "VIT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("vit_hybrid", "VIT_HYBRID_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("vit_mae", "VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("vit_msn", "VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("vitdet", "VITDET_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("vitmatte", "VITMATTE_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("vits", "VITS_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("vivit", "VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("wav2vec2", "WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("wav2vec2-bert", "WAV2VEC2_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("wav2vec2-conformer", "WAV2VEC2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("whisper", "WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("xclip", "XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("xglm", "XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("xlm", "XLM_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("xlm-prophetnet", "XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("xlm-roberta", "XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("xlnet", "XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("xmod", "XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("yolos", "YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ("yoso", "YOSO_PRETRAINED_CONFIG_ARCHIVE_MAP"),
+ ]
+)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..567be97b7cd8631e71367e713dc2f0ef23bd76f5
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__init__.py
@@ -0,0 +1,56 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
+
+
+_import_structure = {
+ "configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
+ "feature_extraction_mctct": ["MCTCTFeatureExtractor"],
+ "processing_mctct": ["MCTCTProcessor"],
+}
+
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_mctct"] = [
+ "MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "MCTCTForCTC",
+ "MCTCTModel",
+ "MCTCTPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
+ from .feature_extraction_mctct import MCTCTFeatureExtractor
+ from .processing_mctct import MCTCTProcessor
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6d449475a45b57a0cce1edf244693df6427eb46e
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/configuration_mctct.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/configuration_mctct.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..74c5756cbfbcff94425ca42048f265ef43df0f51
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/configuration_mctct.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/feature_extraction_mctct.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/feature_extraction_mctct.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7c7ecbac52f28dc0c2263460c0284a23048aabe3
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/feature_extraction_mctct.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/modeling_mctct.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/modeling_mctct.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c235405216670e95e843fe5d3ea1bdff4c4298d9
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/modeling_mctct.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/processing_mctct.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/processing_mctct.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fabb6185ec29424b0aaa185e069bd4006083ce5a
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/__pycache__/processing_mctct.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/configuration_mctct.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/configuration_mctct.py
new file mode 100644
index 0000000000000000000000000000000000000000..6546b18eab0522ce8c618da7bf3a8baab005dd0d
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/configuration_mctct.py
@@ -0,0 +1,184 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""M-CTC-T model configuration"""
+
+from ....configuration_utils import PretrainedConfig
+from ....utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from .._archive_maps import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class MCTCTConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`MCTCTModel`]. It is used to instantiate an
+ M-CTC-T model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the M-CTC-T
+ [speechbrain/m-ctc-t-large](https://huggingface.co/speechbrain/m-ctc-t-large) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 8065):
+ Vocabulary size of the M-CTC-T model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`MCTCTModel`].
+ hidden_size (`int`, *optional*, defaults to 1536):
+ Dimension of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 36):
+ Number of hidden layers in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 6144):
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 4):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ attention_head_dim (`int`, *optional*, defaults to 384):
+ Dimensions of each attention head for each attention layer in the Transformer encoder.
+ max_position_embeddings (`int`, *optional*, defaults to 920):
+ The maximum sequence length that this model might ever be used with (after log-mel spectrogram extraction).
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
+ The epsilon used by the layer normalization layers.
+ layerdrop (`float`, *optional*, defaults to 0.3):
+ The probability of dropping an encoder layer during training. The default 0.3 value is used in the original
+ implementation.
+ hidden_act (`str` or `function`, *optional*, defaults to `"relu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.3):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.3):
+ The dropout ratio for the attention probabilities.
+ pad_token_id (`int`, *optional*, defaults to 1):
+ The tokenizer index of the pad token.
+ bos_token_id (`int`, *optional*, defaults to 0):
+ The tokenizer index of the bos token.
+ eos_token_id (`int`, *optional*, defaults to 2):
+ The tokenizer index of the eos token.
+ conv_glu_dim (`int`, *optional*, defaults to 1):
+ The dimension of the output of the `Conv1dSubsampler` layer in which GLU is applied on. Though the original
+ Flashlight code uses the value of 2, here it's adapted to 1 due to transposition differences.
+ conv_dropout (`int`, *optional*, defaults to 0.3):
+ The probability of randomly dropping the `Conv1dSubsampler` layer during training.
+ num_conv_layers (`int`, *optional*, defaults to 1):
+ Number of convolution layers before applying transformer encoder layers.
+ conv_kernel (`Sequence[int]`, *optional*, defaults to `(7,)`):
+ The kernel size of the 1D convolution applied before transformer layers. `len(conv_kernel)` must be equal
+ to `num_conv_layers`.
+ conv_stride (`Sequence[int]`, *optional*, defaults to `(3,)`):
+ The stride length of the 1D convolution applied before transformer layers. `len(conv_stride)` must be equal
+ to `num_conv_layers`.
+ input_feat_per_channel (`int`, *optional*, defaults to 80):
+ Feature dimensions of the channels of the input to the Conv1D layer.
+ input_channels (`int`, *optional*, defaults to 1):
+ Number of input channels of the input to the Conv1D layer.
+ conv_channels (`List[int]`, *optional*):
+ Channel sizes of intermediate Conv1D layers.
+ ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`):
+ Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an
+ instance of [`MCTCTForCTC`].
+ ctc_zero_infinity (`bool`, *optional*, defaults to `False`):
+ Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly
+ occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance
+ of [`MCTCTForCTC`].
+
+ Example:
+
+ ```python
+ >>> from transformers import MCTCTConfig, MCTCTModel
+
+ >>> # Initializing a M-CTC-T mctct-large style configuration
+ >>> configuration = MCTCTConfig()
+
+ >>> # Initializing a model (with random weights) from the mctct-large style configuration
+ >>> model = MCTCTModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "mctct"
+
+ def __init__(
+ self,
+ vocab_size=8065,
+ hidden_size=1536,
+ num_hidden_layers=36,
+ intermediate_size=6144,
+ num_attention_heads=4,
+ attention_head_dim=384,
+ max_position_embeddings=920,
+ layer_norm_eps=1e-5,
+ layerdrop=0.3,
+ hidden_act="relu",
+ initializer_range=0.02,
+ hidden_dropout_prob=0.3,
+ attention_probs_dropout_prob=0.3,
+ pad_token_id=1,
+ bos_token_id=0,
+ eos_token_id=2,
+ conv_glu_dim=1,
+ conv_dropout=0.3,
+ num_conv_layers=1,
+ conv_kernel=(7,),
+ conv_stride=(3,),
+ input_feat_per_channel=80,
+ input_channels=1,
+ conv_channels=None,
+ ctc_loss_reduction="sum",
+ ctc_zero_infinity=False,
+ **kwargs,
+ ):
+ super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.intermediate_size = intermediate_size
+ self.num_attention_heads = num_attention_heads
+ self.attention_head_dim = attention_head_dim
+ self.max_position_embeddings = max_position_embeddings
+ self.layer_norm_eps = layer_norm_eps
+ self.layerdrop = layerdrop
+ self.hidden_act = hidden_act
+ self.initializer_range = initializer_range
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.pad_token_id = pad_token_id
+ self.bos_token_id = bos_token_id
+ self.eos_token_id = eos_token_id
+ self.conv_glu_dim = conv_glu_dim
+ self.conv_dropout = conv_dropout
+ self.num_conv_layers = num_conv_layers
+ self.input_feat_per_channel = input_feat_per_channel
+ self.input_channels = input_channels
+ self.conv_channels = conv_channels
+ self.ctc_loss_reduction = ctc_loss_reduction
+ self.ctc_zero_infinity = ctc_zero_infinity
+
+ # prevents config testing fail with exporting to json
+ self.conv_kernel = list(conv_kernel)
+ self.conv_stride = list(conv_stride)
+
+ if len(self.conv_kernel) != self.num_conv_layers:
+ raise ValueError(
+ "Configuration for convolutional module is incorrect. "
+ "It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
+ f"but is `len(config.conv_kernel) = {len(self.conv_kernel)}`, "
+ f"`config.num_conv_layers = {self.num_conv_layers}`."
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/feature_extraction_mctct.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/feature_extraction_mctct.py
new file mode 100644
index 0000000000000000000000000000000000000000..e1e17c4b12f91dc25284e30a70388137e52ab82b
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/feature_extraction_mctct.py
@@ -0,0 +1,288 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Feature extractor class for M-CTC-T
+"""
+
+from typing import List, Optional, Union
+
+import numpy as np
+
+from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
+from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
+from ....feature_extraction_utils import BatchFeature
+from ....file_utils import PaddingStrategy, TensorType
+from ....utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class MCTCTFeatureExtractor(SequenceFeatureExtractor):
+ r"""
+ Constructs a M-CTC-T feature extractor.
+
+ This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
+ most of the main methods. Users should refer to this superclass for more information regarding those methods. This
+ code has been adapted from Flashlight's C++ code. For more information about the implementation, one can refer to
+ this [notebook](https://colab.research.google.com/drive/1GLtINkkhzms-IsdcGy_-tVCkv0qNF-Gt#scrollTo=pMCRGMmUC_an)
+ that takes the user step-by-step in the implementation.
+
+ Args:
+ feature_size (`int`, defaults to 80):
+ The feature dimension of the extracted features. This is the number of mel_frequency
+ sampling_rate (`int`, defaults to 16000):
+ The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
+ padding_value (`float`, defaults to 0.0):
+ The value that is used to fill the padding values.
+ hop_length (`int`, defaults to 10):
+ Number of audio samples between windows. Otherwise referred to as "shift" in many papers.
+ win_length (`int`, defaults to 25):
+ Number of ms per window
+ win_function (`str`, defaults to `"hamming_window"`):
+ Name for the window function used for windowing, must be accessible via `torch.{win_function}`
+ frame_signal_scale (`float`, defaults to 32768.0):
+ Constant multiplied in creating the frames before applying DFT.
+ preemphasis_coeff (`float`, defaults to 0.97):
+ Constant multiplied in applying Pre-emphasis before DFT.
+ mel_floor (`float` defaults to 1.0):
+ Minimum value of mel frequency banks.
+ normalize_means (`bool`, *optional*, defaults to `True`):
+ Whether or not to zero-mean normalize the extracted features.
+ normalize_vars (`bool`, *optional*, defaults to `True`):
+ Whether or not to unit-variance normalize the extracted features.
+ """
+
+ model_input_names = ["input_features", "attention_mask"]
+
+ def __init__(
+ self,
+ feature_size=80,
+ sampling_rate=16000,
+ padding_value=0.0,
+ hop_length=10,
+ win_length=25,
+ win_function="hamming_window",
+ frame_signal_scale=32768.0,
+ preemphasis_coeff=0.97,
+ mel_floor=1.0,
+ normalize_means=True,
+ normalize_vars=True,
+ return_attention_mask=False,
+ **kwargs,
+ ):
+ super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
+
+ self.feature_size = feature_size
+ self.sampling_rate = sampling_rate
+ self.padding_value = padding_value
+ self.hop_length = hop_length
+ self.win_length = win_length
+ self.frame_signal_scale = frame_signal_scale
+ self.preemphasis_coeff = preemphasis_coeff
+ self.mel_floor = mel_floor
+ self.normalize_means = normalize_means
+ self.normalize_vars = normalize_vars
+ self.win_function = win_function
+ self.return_attention_mask = return_attention_mask
+
+ self.sample_size = win_length * sampling_rate // 1000
+ self.sample_stride = hop_length * sampling_rate // 1000
+
+ self.n_fft = optimal_fft_length(self.sample_size)
+ self.n_freqs = (self.n_fft // 2) + 1
+
+ def _extract_mfsc_features(self, one_waveform: np.array) -> np.ndarray:
+ """
+ Extracts MFSC Features for one waveform vector (unbatched). Adapted from Flashlight's C++ MFSC code.
+ """
+ if self.win_function == "hamming_window":
+ window = window_function(window_length=self.sample_size, name=self.win_function, periodic=False)
+ else:
+ window = window_function(window_length=self.sample_size, name=self.win_function)
+
+ fbanks = mel_filter_bank(
+ num_frequency_bins=self.n_freqs,
+ num_mel_filters=self.feature_size,
+ min_frequency=0.0,
+ max_frequency=self.sampling_rate / 2.0,
+ sampling_rate=self.sampling_rate,
+ )
+
+ msfc_features = spectrogram(
+ one_waveform * self.frame_signal_scale,
+ window=window,
+ frame_length=self.sample_size,
+ hop_length=self.sample_stride,
+ fft_length=self.n_fft,
+ center=False,
+ preemphasis=self.preemphasis_coeff,
+ mel_filters=fbanks,
+ mel_floor=self.mel_floor,
+ log_mel="log",
+ )
+ return msfc_features.T
+
+ def _normalize_one(self, x, input_length, padding_value):
+ # make sure we normalize float32 arrays
+ if self.normalize_means:
+ mean = x[:input_length].mean(axis=0)
+ x = np.subtract(x, mean)
+ if self.normalize_vars:
+ std = x[:input_length].std(axis=0)
+ x = np.divide(x, std)
+
+ if input_length < x.shape[0]:
+ x[input_length:] = padding_value
+
+ # make sure array is in float32
+ x = x.astype(np.float32)
+
+ return x
+
+ def normalize(
+ self, input_features: List[np.ndarray], attention_mask: Optional[np.ndarray] = None
+ ) -> List[np.ndarray]:
+ lengths = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
+ return [self._normalize_one(x, n, self.padding_value) for x, n in zip(input_features, lengths)]
+
+ def __call__(
+ self,
+ raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
+ padding: Union[bool, str, PaddingStrategy] = False,
+ max_length: Optional[int] = None,
+ truncation: bool = False,
+ pad_to_multiple_of: Optional[int] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ sampling_rate: Optional[int] = None,
+ **kwargs,
+ ) -> BatchFeature:
+ """
+ Main method to featurize and prepare for the model one or several sequence(s). sequences. It returns the
+ log-mel spectrogram of the input audio, as implemented in the original Flashlight MFSC feature extraction code.
+
+ Args:
+ raw_speech (`torch.Tensor`, `np.ndarray`, `List[float]`, `List[torch.Tensor]`, `List[np.ndarray]`, `List[List[float]]`):
+ The sequence or batch of sequences to be padded. Each sequence can be a tensor, a numpy array, a list
+ of float values, a list of tensors, a list of numpy arrays or a list of list of float values. Must be
+ mono channel audio, not stereo, i.e. single float per timestep.
+ padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
+ index) among:
+
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
+ sequence if provided).
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
+ acceptable input length for the model if that argument is not provided.
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
+ lengths).
+ max_length (`int`, *optional*):
+ Maximum length of the returned list and optionally padding length (see above).
+ truncation (`bool`):
+ Activates truncation to cut input sequences longer than *max_length* to *max_length*.
+ pad_to_multiple_of (`int`, *optional*):
+ If set will pad the sequence to a multiple of the provided value.
+
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
+ `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
+ return_attention_mask (`bool`, *optional*):
+ Whether to return the attention mask. If left to the default, will return the attention mask according
+ to the specific feature_extractor's default.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
+ If set, will return tensors instead of list of python integers. Acceptable values are:
+
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
+ - `'np'`: Return Numpy `np.ndarray` objects.
+ sampling_rate (`int`, *optional*):
+ The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
+ `sampling_rate` at the forward call to prevent silent errors.
+ padding_value (`float`, defaults to 0.0):
+ """
+
+ if sampling_rate is not None:
+ if sampling_rate != self.sampling_rate:
+ raise ValueError(
+ f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
+ f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
+ f" {self.sampling_rate} and not {sampling_rate}."
+ )
+ else:
+ logger.warning(
+ "It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
+ "Failing to do so can result in silent errors that might be hard to debug."
+ )
+
+ is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1
+ if is_batched_numpy and len(raw_speech.shape) > 2:
+ raise ValueError(f"Only mono-channel audio is supported for input to {self}")
+ is_batched = is_batched_numpy or (
+ isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list)))
+ )
+
+ if is_batched:
+ raw_speech = [np.asarray(speech, dtype=np.float32) for speech in raw_speech]
+ elif not is_batched and not isinstance(raw_speech, np.ndarray):
+ raw_speech = np.asarray(raw_speech, dtype=np.float32)
+ elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64):
+ raw_speech = raw_speech.astype(np.float32)
+
+ # always return batch
+ if not is_batched:
+ raw_speech = [raw_speech]
+
+ # extract fbank features
+ features = [self._extract_mfsc_features(one_waveform) for one_waveform in raw_speech]
+
+ # convert into correct format for padding
+ encoded_inputs = BatchFeature({"input_features": features})
+
+ padded_inputs = self.pad(
+ encoded_inputs,
+ padding=padding,
+ max_length=max_length,
+ truncation=truncation,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_attention_mask=True,
+ **kwargs,
+ )
+ # make sure list is in array format
+ input_features = padded_inputs.get("input_features")
+ if isinstance(input_features[0], list):
+ padded_inputs["input_features"] = [np.asarray(feature, dtype=np.float32) for feature in input_features]
+
+ attention_mask = padded_inputs.get("attention_mask")
+ if attention_mask is not None:
+ padded_inputs["attention_mask"] = [np.asarray(array, dtype=np.int32) for array in attention_mask]
+
+ if self.normalize_means or self.normalize_vars:
+ attention_mask = (
+ np.array(attention_mask, dtype=np.int32)
+ if self._get_padding_strategies(padding, max_length=max_length) is not PaddingStrategy.DO_NOT_PAD
+ and padding
+ else None
+ )
+ padded_inputs["input_features"] = self.normalize(
+ padded_inputs["input_features"], attention_mask=attention_mask
+ )
+
+ if return_tensors is not None:
+ padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
+
+ return padded_inputs
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/modeling_mctct.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/modeling_mctct.py
new file mode 100644
index 0000000000000000000000000000000000000000..2d9ef6cf724c28a708de4be12161caabb663da12
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/modeling_mctct.py
@@ -0,0 +1,792 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch M-CTC-T model."""
+
+
+import math
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+
+from ....activations import ACT2FN
+from ....file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
+from ....integrations.deepspeed import is_deepspeed_zero3_enabled
+from ....modeling_attn_mask_utils import _prepare_4d_attention_mask
+from ....modeling_outputs import BaseModelOutput, CausalLMOutput
+from ....modeling_utils import (
+ PreTrainedModel,
+ apply_chunking_to_forward,
+ find_pruneable_heads_and_indices,
+ prune_linear_layer,
+)
+from ....utils import logging
+from .configuration_mctct import MCTCTConfig
+
+
+logger = logging.get_logger(__name__)
+
+_HIDDEN_STATES_START_POSITION = 1
+
+_CONFIG_FOR_DOC = "MCTCTConfig"
+
+# Base docstring
+_CHECKPOINT_FOR_DOC = "speechbrain/m-ctc-t-large"
+_EXPECTED_OUTPUT_SHAPE = [1, 195, 1536]
+
+# CTC docstring
+_CTC_EXPECTED_OUTPUT = '"Mr. Quilter is the apostle of the middle classes, and we\'re glad to welcome his gospel."'
+_CTC_EXPECTED_LOSS = 1885.65
+
+
+from .._archive_maps import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+class MCTCTConv1dSubsampler(nn.Module):
+ """
+ Convolutional subsampler: a stack of 1D convolution (along temporal dimension) followed by non-linear activation
+ via gated linear units (https://arxiv.org/abs/1911.08460)
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.glu_dim = config.conv_glu_dim
+
+ self.dropout = nn.Dropout(config.conv_dropout)
+
+ self.num_layers = config.num_conv_layers
+ self.in_channels = config.input_feat_per_channel * config.input_channels
+
+ if self.num_layers > 1:
+ if config.conv_channels is None:
+ raise ValueError(
+ "Need to specify `conv_channels` configuration in `MCTCTConfig` to use multiple convolution"
+ " layers."
+ )
+
+ self.mid_channels = config.conv_channels
+ else:
+ self.mid_channels = None
+
+ self.out_channels = config.hidden_size * 2 # considering GLU halving
+ self.kernel_size = config.conv_kernel
+ self.stride = config.conv_stride
+
+ # NOTE: MCTCT by construction only uses one convolution kernel. I've made this flexible to allow for
+ # multiple layers of convolutions, but not sure if this model definition should just restrict it
+ # to one layer. This becomes especially relevant when considering the padding like line 1 of forward().
+ self.conv_layers = nn.ModuleList(
+ nn.Conv1d(
+ self.in_channels if i == 0 else self.mid_channels[i],
+ self.mid_channels[i] if i < self.num_layers - 1 else self.out_channels,
+ kernel_size=k,
+ stride=self.stride[i],
+ padding="valid",
+ )
+ for i, k in enumerate(self.kernel_size)
+ )
+
+ def forward(self, input_features):
+ # NOTE: in reference to the NOTE in __init__, right now it just calculates padding as if
+ # there will be just one conv layer.
+ padding = sum([size // 2 for size in self.kernel_size]) # (7, 7) -> (3, 3)
+
+ input_features = torch.nn.functional.pad(input_features, (0, 0, padding, padding), "constant", 0)
+ hidden_states = input_features.transpose(1, 2).contiguous() # -> Batch x Frame x Time
+ for conv in self.conv_layers:
+ hidden_states = conv(hidden_states)
+ hidden_states = nn.functional.glu(hidden_states, dim=self.glu_dim)
+ hidden_states = self.dropout(hidden_states)
+
+ hidden_states = hidden_states.transpose(1, 2).contiguous() # -> Batch x Time x Frame
+ return hidden_states
+
+
+class MCTCTEmbeddings(nn.Module):
+ """Construct the embeddings from word, position and token_type embeddings."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
+
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
+ # any TensorFlow checkpoint file
+ # self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.LayerNorm = MCTCTLayerNorm()
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
+ )
+ self.register_buffer(
+ "token_type_ids",
+ torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device),
+ persistent=False,
+ )
+
+ def forward(
+ self, input_features=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
+ ):
+ input_shape = input_features.size() if input_features is not None else inputs_embeds.size()[:-1]
+
+ seq_length = input_shape[1]
+
+ if position_ids is None:
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
+
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
+ # issue #5664
+ if token_type_ids is None:
+ if hasattr(self, "token_type_ids"):
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
+ token_type_ids = buffered_token_type_ids_expanded
+ else:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_features)
+
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
+
+ embeddings = inputs_embeds + token_type_embeddings
+
+ embeddings = self.LayerNorm(embeddings)
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+
+class MCTCTSelfAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads})"
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = config.attention_head_dim
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+
+ self.max_position_embeddings = config.max_position_embeddings
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
+
+ self.is_decoder = config.is_decoder
+
+ def transpose_for_scores(self, x):
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(*new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def reshape_fortran(self, x, shape):
+ if len(x.shape) > 0:
+ x = x.permute(*reversed(range(len(x.shape))))
+ return x.reshape(*reversed(shape)).permute(*reversed(range(len(shape))))
+
+ def relative_position_embedding_rotate(self, scores):
+ # NOTE: should re-evaluate whether this re-implementation was truly necessary
+ # or the reason why my complete re-haul worked was due to some other part
+ # of the code. Adding this and the reshape fortrain code seems very undesirable.
+ scores = scores.permute(0, 2, 3, 1) # e.g. [10, 1839, 14, 4]
+
+ batch, hidden_state, seq_len, heads = scores.shape
+
+ # e.g. [10, 1853, 14, 4]
+ scores = torch.cat((scores, torch.zeros((batch, seq_len, seq_len, heads), device=scores.device)), dim=1)
+
+ # e.g. [10, 25942, 1, 4]
+ scores = self.reshape_fortran(scores, [batch, (hidden_state + seq_len) * seq_len, 1, heads])
+
+ # e.g. [10, 25928, 1, 4]
+ scores = scores[:, : (seq_len + hidden_state - 1) * seq_len]
+
+ # e.g. [10, 1852, 14, 4]
+ scores = self.reshape_fortran(scores, [batch, hidden_state + seq_len - 1, seq_len, heads])
+
+ halfpoint = hidden_state // 2
+ scores = scores[:, halfpoint : halfpoint + seq_len].transpose(1, 2) # e.g. [10, 14, 14, 4]
+
+ return scores.permute(0, 3, 1, 2)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ output_attentions=False,
+ ):
+ mixed_query_layer = self.query(hidden_states)
+ mixed_query_layer = mixed_query_layer / math.sqrt(self.attention_head_size)
+
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ # relative key position embeddings
+ positional_embedding = self.distance_embedding.weight
+ relative_position_scores = torch.einsum("lh, bche -> bcle", positional_embedding, query_layer.transpose(2, 3))
+
+ relative_position_scores = self.relative_position_embedding_rotate(relative_position_scores)
+ attention_scores = attention_scores + relative_position_scores
+
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in MCTCTModel forward() function)
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).flatten(start_dim=-2)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ return outputs
+
+
+class MCTCTLayerNorm(nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.singleton_weight = nn.Parameter(torch.ones(1))
+ self.singleton_bias = nn.Parameter(torch.zeros(1))
+
+ def forward(self, hidden_states):
+ return (hidden_states * self.singleton_weight) + self.singleton_bias
+
+
+class MCTCTSelfOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size, bias=False)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states, input_tensor):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+class MCTCTAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.self = MCTCTSelfAttention(config)
+ self.output = MCTCTSelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.self.query = prune_linear_layer(self.self.query, index)
+ self.self.key = prune_linear_layer(self.self.key, index)
+ self.self.value = prune_linear_layer(self.self.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ output_attentions=False,
+ ):
+ self_outputs = self.self(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ output_attentions,
+ )
+ attention_output = self.output(self_outputs[0], hidden_states)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+
+ return outputs
+
+
+class MCTCTIntermediate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size, bias=False)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+class MCTCTOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size, bias=False)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states, input_tensor):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+class MCTCTLayer(nn.Module):
+ def __init__(self, config: MCTCTConfig):
+ super().__init__()
+
+ self.seq_len_dim = 1
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+
+ self.intermediate = MCTCTIntermediate(config)
+ self.attention = MCTCTAttention(config)
+ self.is_decoder = config.is_decoder
+ self.output = MCTCTOutput(config)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ output_attentions=False,
+ ):
+ self_attention_outputs = self.attention(
+ hidden_states, attention_mask, head_mask, output_attentions=output_attentions
+ )
+ attention_output = self_attention_outputs[0]
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ layer_output = apply_chunking_to_forward(
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
+ )
+
+ outputs = (layer_output,) + outputs
+
+ return outputs
+
+ def feed_forward_chunk(self, attention_output):
+ intermediate_output = self.intermediate(attention_output)
+ layer_output = self.output(intermediate_output, attention_output)
+ return layer_output
+
+
+class MCTCTPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = MCTCTConfig
+ base_model_prefix = "mctct"
+ main_input_name = "input_features"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ std = self.config.initializer_range
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+ elif isinstance(module, MCTCTLayerNorm):
+ module.singleton_weight.data.fill_(1.0)
+ module.singleton_bias.data.zero_()
+ if isinstance(module, (nn.Linear, nn.Conv1d)):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+
+ def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
+ """
+ Computes the output length of the convolutional layers
+ """
+ dilation = 1
+ for _, kernel_sz, stride in zip(
+ range(self.config.num_conv_layers), self.config.conv_kernel, self.config.conv_stride
+ ):
+ padding = kernel_sz // 2
+ input_lengths = input_lengths + 2 * padding - dilation * (kernel_sz - 1) - 1
+ input_lengths = torch.div(input_lengths, stride, rounding_mode="trunc") + 1
+
+ return input_lengths
+
+ def _get_feature_vector_attention_mask(self, feature_vector_length, attention_mask):
+ # generate creates 3D attention mask, because of the shape of input_features
+ # convert it to 2D if thats the case
+ if len(attention_mask.shape) > 2:
+ attention_mask = attention_mask[:, :, -1]
+
+ # subsampled_lengths = attention_mask.sum(-1)
+ subsampled_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1))
+ bsz = attention_mask.size()[0]
+ attention_mask = torch.zeros(
+ (bsz, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
+ )
+
+ # these two operations makes sure that all values
+ # before the output lengths indices are attended to
+ attention_mask[(torch.arange(bsz, device=attention_mask.device), subsampled_lengths - 1)] = 1
+ attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).long()
+ return attention_mask
+
+
+MCTCT_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`MCTCTConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+MCTCT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_features (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`Wav2Vec2CTCTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+class MCTCTEncoder(MCTCTPreTrainedModel):
+ def __init__(self, config: MCTCTConfig):
+ super().__init__(config)
+ self.hidden_dropout_prob = config.hidden_dropout_prob
+
+ self.layer_norm = MCTCTLayerNorm()
+ self.conv = MCTCTConv1dSubsampler(config)
+ self.layers = nn.ModuleList([MCTCTLayer(config) for _ in range(config.num_hidden_layers)])
+
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ input_features: torch.Tensor,
+ attention_mask: torch.Tensor,
+ head_mask: torch.Tensor,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ) -> Union[Tuple, BaseModelOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ input_features = self.layer_norm(input_features)
+
+ inputs_embeds = self.conv(input_features)
+
+ # subsample attention mask if necessary
+ if attention_mask is not None:
+ attention_mask = self._get_feature_vector_attention_mask(inputs_embeds.shape[1], attention_mask)
+
+ hidden_states = nn.functional.dropout(inputs_embeds, p=self.hidden_dropout_prob, training=self.training)
+
+ # expand attention_mask
+ if attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ # check if head_mask has a correct number of layers specified if desired
+ if head_mask is not None:
+ if head_mask.size()[0] != len(self.layers):
+ raise ValueError(
+ f"The head_mask should be specified for {len(self.layers)} layers, "
+ f"but it is for {head_mask.size()[0]}."
+ )
+
+ deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ dropout_probability = torch.rand([])
+
+ skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
+ if not skip_the_layer or deepspeed_zero3_is_enabled:
+ # under deepspeed zero3 all gpus must run in sync
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ encoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ (head_mask[idx] if head_mask is not None else None),
+ output_attentions,
+ )
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if skip_the_layer:
+ layer_outputs = (None, None)
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+
+@add_start_docstrings(
+ "The bare M-CTC-T Model transformer outputting raw hidden-states without any specific head on top.",
+ MCTCT_START_DOCSTRING,
+)
+class MCTCTModel(MCTCTPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.config = config
+
+ self.encoder = MCTCTEncoder(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(MCTCT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ modality="audio",
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
+ )
+ def forward(
+ self,
+ input_features: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_features is None:
+ raise ValueError("You have to specify input_features.")
+
+ encoder_outputs = self.encoder(
+ input_features,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+
+ if not return_dict:
+ return (sequence_output,) + encoder_outputs[1:]
+
+ return BaseModelOutput(
+ last_hidden_state=sequence_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """MCTCT Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""",
+ MCTCT_START_DOCSTRING,
+)
+class MCTCTForCTC(MCTCTPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.mctct = MCTCTModel(config)
+
+ if config.vocab_size is None:
+ raise ValueError(
+ f"You are trying to instantiate {self.__class__} with a configuration that "
+ "does not define the vocabulary size of the language model head. Please "
+ "instantiate the model as follows: `MCTCTForCTC.from_pretrained(..., vocab_size=vocab_size)`. "
+ "or define `vocab_size` of your model's configuration."
+ )
+ output_hidden_size = config.hidden_size
+
+ self.ctc_head = nn.Linear(output_hidden_size, config.vocab_size)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(MCTCT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=CausalLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_CTC_EXPECTED_OUTPUT,
+ expected_loss=_CTC_EXPECTED_LOSS,
+ )
+ def forward(
+ self,
+ input_features: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: Optional[torch.LongTensor] = None,
+ ) -> Union[Tuple, CausalLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
+ Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
+ the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
+ All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
+ config.vocab_size - 1]`.
+ """
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ outputs = self.mctct(
+ input_features,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+
+ logits = self.ctc_head(hidden_states)
+
+ loss = None
+ if labels is not None:
+ if labels.max() >= self.config.vocab_size:
+ raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}")
+
+ # retrieve loss input_lengths from attention_mask
+ attention_mask = (
+ attention_mask
+ if attention_mask is not None
+ else torch.ones(input_features.shape[:-1], dtype=torch.long)
+ )
+ input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
+ # assuming that padded tokens are filled with -100
+ # when not being attended to
+ labels_mask = labels >= 0
+ target_lengths = labels_mask.sum(-1)
+ flattened_targets = labels.masked_select(labels_mask)
+
+ # ctc_loss doesn't support fp16
+ log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)
+
+ with torch.backends.cudnn.flags(enabled=False):
+ loss = nn.functional.ctc_loss(
+ log_probs,
+ flattened_targets,
+ input_lengths,
+ target_lengths,
+ blank=self.config.pad_token_id,
+ reduction=self.config.ctc_loss_reduction,
+ zero_infinity=self.config.ctc_zero_infinity,
+ )
+
+ if not return_dict:
+ output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
+ return ((loss,) + output) if loss is not None else output
+
+ return CausalLMOutput(
+ loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/processing_mctct.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/processing_mctct.py
new file mode 100644
index 0000000000000000000000000000000000000000..4e0cbe27dd9be0244d63a23256808cc421fa1fa5
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mctct/processing_mctct.py
@@ -0,0 +1,142 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Speech processor class for M-CTC-T
+"""
+import warnings
+from contextlib import contextmanager
+
+from ....processing_utils import ProcessorMixin
+
+
+class MCTCTProcessor(ProcessorMixin):
+ r"""
+ Constructs a MCTCT processor which wraps a MCTCT feature extractor and a MCTCT tokenizer into a single processor.
+
+ [`MCTCTProcessor`] offers all the functionalities of [`MCTCTFeatureExtractor`] and [`AutoTokenizer`]. See the
+ [`~MCTCTProcessor.__call__`] and [`~MCTCTProcessor.decode`] for more information.
+
+ Args:
+ feature_extractor (`MCTCTFeatureExtractor`):
+ An instance of [`MCTCTFeatureExtractor`]. The feature extractor is a required input.
+ tokenizer (`AutoTokenizer`):
+ An instance of [`AutoTokenizer`]. The tokenizer is a required input.
+ """
+
+ feature_extractor_class = "MCTCTFeatureExtractor"
+ tokenizer_class = "AutoTokenizer"
+
+ def __init__(self, feature_extractor, tokenizer):
+ super().__init__(feature_extractor, tokenizer)
+ self.current_processor = self.feature_extractor
+ self._in_target_context_manager = False
+
+ def __call__(self, *args, **kwargs):
+ """
+ When used in normal mode, this method forwards all its arguments to MCTCTFeatureExtractor's
+ [`~MCTCTFeatureExtractor.__call__`] and returns its output. If used in the context
+ [`~MCTCTProcessor.as_target_processor`] this method forwards all its arguments to AutoTokenizer's
+ [`~AutoTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more information.
+ """
+ # For backward compatibility
+ if self._in_target_context_manager:
+ return self.current_processor(*args, **kwargs)
+
+ if "raw_speech" in kwargs:
+ warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.")
+ audio = kwargs.pop("raw_speech")
+ else:
+ audio = kwargs.pop("audio", None)
+ sampling_rate = kwargs.pop("sampling_rate", None)
+ text = kwargs.pop("text", None)
+ if len(args) > 0:
+ audio = args[0]
+ args = args[1:]
+
+ if audio is None and text is None:
+ raise ValueError("You need to specify either an `audio` or `text` input to process.")
+
+ if audio is not None:
+ inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs)
+ if text is not None:
+ encodings = self.tokenizer(text, **kwargs)
+
+ if text is None:
+ return inputs
+ elif audio is None:
+ return encodings
+ else:
+ inputs["labels"] = encodings["input_ids"]
+ return inputs
+
+ def batch_decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to AutoTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer
+ to the docstring of this method for more information.
+ """
+ return self.tokenizer.batch_decode(*args, **kwargs)
+
+ def pad(self, *args, **kwargs):
+ """
+ When used in normal mode, this method forwards all its arguments to MCTCTFeatureExtractor's
+ [`~MCTCTFeatureExtractor.pad`] and returns its output. If used in the context
+ [`~MCTCTProcessor.as_target_processor`] this method forwards all its arguments to PreTrainedTokenizer's
+ [`~PreTrainedTokenizer.pad`]. Please refer to the docstring of the above two methods for more information.
+ """
+ # For backward compatibility
+ if self._in_target_context_manager:
+ return self.current_processor.pad(*args, **kwargs)
+
+ input_features = kwargs.pop("input_features", None)
+ labels = kwargs.pop("labels", None)
+ if len(args) > 0:
+ input_features = args[0]
+ args = args[1:]
+
+ if input_features is not None:
+ input_features = self.feature_extractor.pad(input_features, *args, **kwargs)
+ if labels is not None:
+ labels = self.tokenizer.pad(labels, **kwargs)
+
+ if labels is None:
+ return input_features
+ elif input_features is None:
+ return labels
+ else:
+ input_features["labels"] = labels["input_ids"]
+ return input_features
+
+ def decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to AutoTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the
+ docstring of this method for more information.
+ """
+ return self.tokenizer.decode(*args, **kwargs)
+
+ @contextmanager
+ def as_target_processor(self):
+ """
+ Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning MCTCT.
+ """
+ warnings.warn(
+ "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
+ "labels by using the argument `text` of the regular `__call__` method (either in the same call as "
+ "your audio inputs, or in a separate call."
+ )
+ self._in_target_context_manager = True
+ self.current_processor = self.tokenizer
+ yield
+ self.current_processor = self.feature_extractor
+ self._in_target_context_manager = False
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e467090cb4fbfa55ec51ec8232a54180c532ad6c
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__init__.py
@@ -0,0 +1,45 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
+
+
+_import_structure = {"configuration_mmbt": ["MMBTConfig"]}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_mmbt"] = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
+
+
+if TYPE_CHECKING:
+ from .configuration_mmbt import MMBTConfig
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4db22a0266c3be48dd236d649d9c6acab50d4c6a
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__pycache__/configuration_mmbt.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__pycache__/configuration_mmbt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2e1a30fcd3124f58bda47a9e7aca78b30cd09ac5
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__pycache__/configuration_mmbt.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__pycache__/modeling_mmbt.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__pycache__/modeling_mmbt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2888cb387cb35039e8423d879f2f3b8a4c1dff72
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/__pycache__/modeling_mmbt.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/configuration_mmbt.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/configuration_mmbt.py
new file mode 100644
index 0000000000000000000000000000000000000000..df5161b0927ad26279a273216d1d9ab6d465063a
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/configuration_mmbt.py
@@ -0,0 +1,42 @@
+# coding=utf-8
+# Copyright (c) Facebook, Inc. and its affiliates.
+# Copyright (c) HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" MMBT configuration"""
+
+from ....utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class MMBTConfig(object):
+ """
+ This is the configuration class to store the configuration of a [`MMBTModel`]. It is used to instantiate a MMBT
+ model according to the specified arguments, defining the model architecture.
+
+ Args:
+ config ([`PreTrainedConfig`]):
+ Config of the underlying Transformer models. Its values are copied over to use a single config.
+ num_labels (`int`, *optional*):
+ Size of final Linear layer for classification.
+ modal_hidden_size (`int`, *optional*, defaults to 2048):
+ Embedding dimension of the non-text modality encoder.
+ """
+
+ def __init__(self, config, num_labels=None, modal_hidden_size=2048):
+ self.__dict__ = config.__dict__
+ self.modal_hidden_size = modal_hidden_size
+ if num_labels:
+ self.num_labels = num_labels
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/modeling_mmbt.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/modeling_mmbt.py
new file mode 100644
index 0000000000000000000000000000000000000000..8dc450ce8f6c13346f30e7da045a927a1186e089
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/mmbt/modeling_mmbt.py
@@ -0,0 +1,408 @@
+# coding=utf-8
+# Copyright (c) Facebook, Inc. and its affiliates.
+# Copyright (c) HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch MMBT model."""
+
+
+import torch
+from torch import nn
+from torch.nn import CrossEntropyLoss, MSELoss
+
+from ....modeling_outputs import BaseModelOutputWithPooling, SequenceClassifierOutput
+from ....modeling_utils import ModuleUtilsMixin
+from ....utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "MMBTConfig"
+
+
+class ModalEmbeddings(nn.Module):
+ """Generic Modal Embeddings which takes in an encoder, and a transformer embedding."""
+
+ def __init__(self, config, encoder, embeddings):
+ super().__init__()
+ self.config = config
+ self.encoder = encoder
+ self.proj_embeddings = nn.Linear(config.modal_hidden_size, config.hidden_size)
+ self.position_embeddings = embeddings.position_embeddings
+ self.token_type_embeddings = embeddings.token_type_embeddings
+ self.word_embeddings = embeddings.word_embeddings
+ self.LayerNorm = embeddings.LayerNorm
+ self.dropout = nn.Dropout(p=config.hidden_dropout_prob)
+
+ def forward(self, input_modal, start_token=None, end_token=None, position_ids=None, token_type_ids=None):
+ token_embeddings = self.proj_embeddings(self.encoder(input_modal))
+ seq_length = token_embeddings.size(1)
+
+ if start_token is not None:
+ start_token_embeds = self.word_embeddings(start_token)
+ seq_length += 1
+ token_embeddings = torch.cat([start_token_embeds.unsqueeze(1), token_embeddings], dim=1)
+
+ if end_token is not None:
+ end_token_embeds = self.word_embeddings(end_token)
+ seq_length += 1
+ token_embeddings = torch.cat([token_embeddings, end_token_embeds.unsqueeze(1)], dim=1)
+
+ if position_ids is None:
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=input_modal.device)
+ position_ids = position_ids.unsqueeze(0).expand(input_modal.size(0), seq_length)
+
+ if token_type_ids is None:
+ token_type_ids = torch.zeros(
+ (input_modal.size(0), seq_length), dtype=torch.long, device=input_modal.device
+ )
+
+ position_embeddings = self.position_embeddings(position_ids)
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
+ embeddings = token_embeddings + position_embeddings + token_type_embeddings
+ embeddings = self.LayerNorm(embeddings)
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+
+MMBT_START_DOCSTRING = r"""
+ MMBT model was proposed in [Supervised Multimodal Bitransformers for Classifying Images and
+ Text](https://github.com/facebookresearch/mmbt) by Douwe Kiela, Suvrat Bhooshan, Hamed Firooz, Davide Testuggine.
+ It's a supervised multimodal bitransformer model that fuses information from text and other image encoders, and
+ obtain state-of-the-art performance on various multimodal classification benchmark tasks.
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`MMBTConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration.
+ transformer (`nn.Module`): A text transformer that is used by MMBT.
+ It should have embeddings, encoder, and pooler attributes.
+ encoder (`nn.Module`): Encoder for the second modality.
+ It should take in a batch of modal inputs and return k, n dimension embeddings.
+"""
+
+MMBT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_modal (`torch.FloatTensor` of shape `(batch_size, ***)`):
+ The other modality data. It will be the shape that the encoder for that type expects. e.g. With an Image
+ Encoder, the shape would be (batch_size, channels, height, width)
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. It does not expect [CLS] token to be added as it's
+ appended to the end of other modality embeddings. Indices can be obtained using [`AutoTokenizer`]. See
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ modal_start_tokens (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Optional start token to be added to Other Modality Embedding. [CLS] Most commonly used for classification
+ tasks.
+ modal_end_tokens (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Optional end token to be added to Other Modality Embedding. [SEP] Most commonly used.
+ attention_mask (*optional*) `torch.FloatTensor` of shape `(batch_size, sequence_length)`:
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (*optional*) `torch.LongTensor` of shape `(batch_size, sequence_length)`:
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ modal_token_type_ids (*optional*) `torch.LongTensor` of shape `(batch_size, modal_sequence_length)`:
+ Segment token indices to indicate different portions of the non-text modality. The embeddings from these
+ tokens will be summed with the respective token embeddings for the non-text modality.
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ modal_position_ids (`torch.LongTensor` of shape `(batch_size, modal_sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings for the non-text modality.
+ Selected in the range `[0, config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, embedding_dim)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare MMBT Model outputting raw hidden-states without any specific head on top.",
+ MMBT_START_DOCSTRING,
+)
+class MMBTModel(nn.Module, ModuleUtilsMixin):
+ def __init__(self, config, transformer, encoder):
+ super().__init__()
+ self.config = config
+ self.transformer = transformer
+ self.modal_encoder = ModalEmbeddings(config, encoder, transformer.embeddings)
+
+ @add_start_docstrings_to_model_forward(MMBT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_modal,
+ input_ids=None,
+ modal_start_tokens=None,
+ modal_end_tokens=None,
+ attention_mask=None,
+ token_type_ids=None,
+ modal_token_type_ids=None,
+ position_ids=None,
+ modal_position_ids=None,
+ head_mask=None,
+ inputs_embeds=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ ):
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ # For example purposes. Not runnable.
+ transformer = BertModel.from_pretrained("google-bert/bert-base-uncased")
+ encoder = ImageEncoder(args)
+ mmbt = MMBTModel(config, transformer, encoder)
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_txt_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_txt_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ modal_embeddings = self.modal_encoder(
+ input_modal,
+ start_token=modal_start_tokens,
+ end_token=modal_end_tokens,
+ position_ids=modal_position_ids,
+ token_type_ids=modal_token_type_ids,
+ )
+
+ input_modal_shape = modal_embeddings.size()[:-1]
+
+ if token_type_ids is None:
+ token_type_ids = torch.ones(input_txt_shape, dtype=torch.long, device=device)
+
+ txt_embeddings = self.transformer.embeddings(
+ input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
+ )
+
+ embedding_output = torch.cat([modal_embeddings, txt_embeddings], 1)
+
+ input_shape = embedding_output.size()[:-1]
+
+ if attention_mask is None:
+ attention_mask = torch.ones(input_shape, device=device)
+ else:
+ attention_mask = torch.cat(
+ [torch.ones(input_modal_shape, device=device, dtype=torch.long), attention_mask], dim=1
+ )
+ if encoder_attention_mask is None:
+ encoder_attention_mask = torch.ones(input_shape, device=device)
+ else:
+ encoder_attention_mask = torch.cat(
+ [torch.ones(input_modal_shape, device=device), encoder_attention_mask], dim=1
+ )
+
+ extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ encoder_outputs = self.transformer.encoder(
+ embedding_output,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_extended_attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = encoder_outputs[0]
+ pooled_output = self.transformer.pooler(sequence_output)
+
+ if not return_dict:
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPooling(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.word_embeddings = value
+
+
+@add_start_docstrings(
+ """
+ MMBT Model with a sequence classification/regression head on top (a linear layer on top of the pooled output)
+ """,
+ MMBT_START_DOCSTRING,
+ MMBT_INPUTS_DOCSTRING,
+)
+class MMBTForClassification(nn.Module):
+ r"""
+ **labels**: (*optional*) `torch.LongTensor` of shape `(batch_size,)`:
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+
+ Returns: *Tuple* comprising various elements depending on the configuration (config) and inputs: **loss**:
+ (*optional*, returned when `labels` is provided) `torch.FloatTensor` of shape `(1,)`: Classification (or
+ regression if config.num_labels==1) loss. **logits**:
+ `torch.FloatTensor` of shape `(batch_size, config.num_labels)` Classification (or regression if
+ config.num_labels==1) scores (before SoftMax).
+ **hidden_states**: (*optional*, returned when `output_hidden_states=True`) list of `torch.FloatTensor` (one for
+ the output of each layer + the output of the embeddings) of shape `(batch_size, sequence_length, hidden_size)`:
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**:
+ (*optional*, returned when `output_attentions=True`) list of `torch.FloatTensor` (one for each layer) of shape
+ `(batch_size, num_heads, sequence_length, sequence_length)`: Attentions weights after the attention softmax, used
+ to compute the weighted average in the self-attention heads.
+
+ Examples:
+
+ ```python
+ # For example purposes. Not runnable.
+ transformer = BertModel.from_pretrained("google-bert/bert-base-uncased")
+ encoder = ImageEncoder(args)
+ model = MMBTForClassification(config, transformer, encoder)
+ outputs = model(input_modal, input_ids, labels=labels)
+ loss, logits = outputs[:2]
+ ```"""
+
+ def __init__(self, config, transformer, encoder):
+ super().__init__()
+ self.num_labels = config.num_labels
+
+ self.mmbt = MMBTModel(config, transformer, encoder)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ def forward(
+ self,
+ input_modal,
+ input_ids=None,
+ modal_start_tokens=None,
+ modal_end_tokens=None,
+ attention_mask=None,
+ token_type_ids=None,
+ modal_token_type_ids=None,
+ position_ids=None,
+ modal_position_ids=None,
+ head_mask=None,
+ inputs_embeds=None,
+ labels=None,
+ return_dict=None,
+ ):
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.mmbt(
+ input_modal=input_modal,
+ input_ids=input_ids,
+ modal_start_tokens=modal_start_tokens,
+ modal_end_tokens=modal_end_tokens,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ modal_token_type_ids=modal_token_type_ids,
+ position_ids=position_ids,
+ modal_position_ids=modal_position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ return_dict=return_dict,
+ )
+
+ pooled_output = outputs[1]
+
+ pooled_output = self.dropout(pooled_output)
+ logits = self.classifier(pooled_output)
+
+ loss = None
+ if labels is not None:
+ if self.num_labels == 1:
+ # We are doing regression
+ loss_fct = MSELoss()
+ loss = loss_fct(logits.view(-1), labels.view(-1))
+ else:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/tapex/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/tapex/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..82bbacd15b0d00509972e16ac406005ee97370f7
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/tapex/__init__.py
@@ -0,0 +1,29 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ....utils import _LazyModule
+
+
+_import_structure = {"tokenization_tapex": ["TapexTokenizer"]}
+
+
+if TYPE_CHECKING:
+ from .tokenization_tapex import TapexTokenizer
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/tapex/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/tapex/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..61312159f7b37aa548b782a2d76557603de2ab61
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/tapex/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/tapex/__pycache__/tokenization_tapex.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/tapex/__pycache__/tokenization_tapex.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6705d3d3325263fe002e59b31306c0646203a632
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/tapex/__pycache__/tokenization_tapex.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/tapex/tokenization_tapex.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/tapex/tokenization_tapex.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd3d353b526c4a8d4ba033ce8c0ed47137852b30
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/tapex/tokenization_tapex.py
@@ -0,0 +1,1467 @@
+# coding=utf-8
+# Copyright 2022 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for TAPEX."""
+
+import json
+import os
+import random
+from functools import lru_cache
+from typing import Dict, List, Optional, Tuple, Union
+
+import regex as re
+
+from ....file_utils import ExplicitEnum, PaddingStrategy, TensorType, add_end_docstrings, is_pandas_available
+from ....tokenization_utils import AddedToken, PreTrainedTokenizer
+from ....tokenization_utils_base import ENCODE_KWARGS_DOCSTRING, BatchEncoding, TextInput, TruncationStrategy
+from ....utils import logging
+
+
+if is_pandas_available():
+ import pandas as pd
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
+
+
+class TapexTruncationStrategy(ExplicitEnum):
+ """
+ Possible values for the `truncation` argument in [`~TapasTokenizer.__call__`]. Useful for tab-completion in an IDE.
+ """
+
+ DROP_ROWS_TO_FIT = "drop_rows_to_fit"
+
+
+TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r"""
+ add_special_tokens (`bool`, *optional*, defaults to `True`):
+ Whether or not to encode the sequences with the special tokens relative to their model.
+ padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
+ Activates and controls padding. Accepts the following values:
+
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
+ sequence if provided).
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
+ acceptable input length for the model if that argument is not provided.
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
+ lengths).
+ truncation (`bool`, `str`, [`TapexTruncationStrategy`] or [`~tokenization_utils_base.TruncationStrategy`],
+ *optional*, defaults to `False`):
+
+ Activates and controls truncation. Accepts the following values:
+
+ - `'drop_rows_to_fit'`: Truncate to a maximum length specified with the argument `max_length` or to the
+ maximum acceptable input length for the model if that argument is not provided. This will truncate
+ row by row, removing rows from the table.
+ - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
+ to the maximum acceptable input length for the model if that argument is not provided. This will
+ truncate token by token, removing a token from the longest sequence in the pair if a pair of
+ sequences (or a batch of pairs) is provided.
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
+ maximum acceptable input length for the model if that argument is not provided. This will only
+ truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
+ maximum acceptable input length for the model if that argument is not provided. This will only
+ truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
+ - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
+ greater than the model maximum admissible input size).
+ max_length (`int`, *optional*):
+ Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to
+ `None`, this will use the predefined model maximum length if a maximum length is required by one of the
+ truncation/padding parameters. If the model has no specific maximum input length (like XLNet)
+ truncation/padding to a maximum length will be deactivated.
+ stride (`int`, *optional*, defaults to 0):
+ If set to a number along with `max_length`, the overflowing tokens returned when
+ `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
+ returned to provide some overlap between truncated and overflowing sequences. The value of this
+ argument defines the number of overlapping tokens.
+ pad_to_multiple_of (`int`, *optional*):
+ If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
+ the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
+ return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
+ If set, will return tensors instead of list of python integers. Acceptable values are:
+
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
+ - `'np'`: Return Numpy `np.ndarray` objects.
+"""
+
+
+@lru_cache()
+def bytes_to_unicode():
+ """
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
+ characters the bpe code barfs on. The reversible bpe codes work on unicode strings. This means you need a large #
+ of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset
+ you end up needing around 5K for decent coverage. This is a significant percentage of your normal, say, 32K bpe
+ vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
+ """
+ bs = (
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
+ )
+ cs = bs[:]
+ n = 0
+ for b in range(2**8):
+ if b not in bs:
+ bs.append(b)
+ cs.append(2**8 + n)
+ n += 1
+ cs = [chr(n) for n in cs]
+ return dict(zip(bs, cs))
+
+
+def get_pairs(word):
+ """
+ Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length
+ strings).
+ """
+ pairs = set()
+ prev_char = word[0]
+ for char in word[1:]:
+ pairs.add((prev_char, char))
+ prev_char = char
+ return pairs
+
+
+class IndexedRowTableLinearize:
+ """
+ FORMAT: col: col1 | col2 | col 3 row 1 : val1 | val2 | val3 row 2 : ...
+ """
+
+ def process_table(self, table_content: Dict):
+ """
+ Given a table, TableLinearize aims at converting it into a flatten sequence with special symbols.
+ """
+ assert "header" in table_content and "rows" in table_content, self.PROMPT_MESSAGE
+ # process header
+ table_str = self.process_header(table_content["header"]) + " "
+ # process rows
+ for i, row_example in enumerate(table_content["rows"]):
+ # NOTE: the row should start from row 1 instead of 0
+ table_str += self.process_row(row_example, row_index=i + 1) + " "
+ return table_str.strip()
+
+ def process_header(self, headers: List):
+ """
+ Given a list of headers, TableLinearize aims at converting it into a flatten sequence with special symbols.
+ """
+ return "col : " + " | ".join(headers)
+
+ def process_row(self, row: List, row_index: int):
+ """
+ Given a row, TableLinearize aims at converting it into a flatten sequence with special symbols.
+ """
+ row_str = ""
+ row_cell_values = []
+ for cell_value in row:
+ if isinstance(cell_value, int):
+ row_cell_values.append(str(cell_value))
+ else:
+ row_cell_values.append(cell_value)
+ row_str += " | ".join(row_cell_values)
+ return "row " + str(row_index) + " : " + row_str
+
+
+class TapexTokenizer(PreTrainedTokenizer):
+ r"""
+ Construct a TAPEX tokenizer. Based on byte-level Byte-Pair-Encoding (BPE).
+
+ This tokenizer can be used to flatten one or more table(s) and concatenate them with one or more related sentences
+ to be used by TAPEX models. The format that the TAPEX tokenizer creates is the following:
+
+ sentence col: col1 | col2 | col 3 row 1 : val1 | val2 | val3 row 2 : ...
+
+ The tokenizer supports a single table + single query, a single table and multiple queries (in which case the table
+ will be duplicated for every query), a single query and multiple tables (in which case the query will be duplicated
+ for every table), and multiple tables and queries. In other words, you can provide a batch of tables + questions to
+ the tokenizer for instance to prepare them for the model.
+
+ Tokenization itself is based on the BPE algorithm. It is identical to the one used by BART, RoBERTa and GPT-2.
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ merges_file (`str`):
+ Path to the merges file.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ errors (`str`, *optional*, defaults to `"replace"`):
+ Paradigm to follow when decoding bytes to UTF-8. See
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
+ The token used is the `sep_token`.
+
+
+
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ cls_token (`str`, *optional*, defaults to `""`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ mask_token (`str`, *optional*, defaults to `""`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
+ other word. (BART tokenizer detect beginning of words by the preceding space).
+ max_cell_length (`int`, *optional*, defaults to 15):
+ Maximum number of characters per cell when linearizing a table. If this number is exceeded, truncation
+ takes place.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file,
+ merges_file,
+ do_lower_case=True,
+ errors="replace",
+ bos_token="",
+ eos_token="",
+ sep_token="",
+ cls_token="",
+ unk_token="",
+ pad_token="",
+ mask_token="",
+ add_prefix_space=False,
+ max_cell_length=15,
+ **kwargs,
+ ):
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
+ sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
+ cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
+
+ # Mask token behave like a normal word, i.e. include the space before it
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
+
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
+ self.encoder = json.load(vocab_handle)
+ self.decoder = {v: k for k, v in self.encoder.items()}
+ self.errors = errors # how to handle errors in decoding
+ self.byte_encoder = bytes_to_unicode()
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
+ with open(merges_file, encoding="utf-8") as merges_handle:
+ bpe_merges = merges_handle.read().split("\n")[1:-1]
+ bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
+ self.cache = {}
+ self.add_prefix_space = add_prefix_space
+ self.do_lower_case = do_lower_case
+
+ # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
+ self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
+
+ # additional properties
+
+ super().__init__(
+ vocab_file=vocab_file,
+ merges_file=merges_file,
+ do_lower_case=do_lower_case,
+ errors=errors,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ cls_token=cls_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ add_prefix_space=add_prefix_space,
+ max_cell_length=max_cell_length,
+ **kwargs,
+ )
+
+ self.max_cell_length = max_cell_length
+ self.table_linearize = IndexedRowTableLinearize()
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A TAPEX sequence has the following format:
+ - single sequence: ` X `
+ - pair of sequences: ` A B `
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Args:
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is None:
+ return [1] + ([0] * len(token_ids_0)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Args:
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. TAPEX does not:
+ make use of token type ids, therefore a list of zeros is returned.
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ Returns:
+ `List[int]`: List of zeros.
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
+
+ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
+ add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
+ if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()):
+ text = " " + text
+ return (text, kwargs)
+
+ @property
+ def vocab_size(self):
+ return len(self.encoder)
+
+ def get_vocab(self):
+ return dict(self.encoder, **self.added_tokens_encoder)
+
+ def bpe(self, token):
+ if token in self.cache:
+ return self.cache[token]
+ word = tuple(token)
+ pairs = get_pairs(word)
+
+ if not pairs:
+ return token
+
+ while True:
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
+ if bigram not in self.bpe_ranks:
+ break
+ first, second = bigram
+ new_word = []
+ i = 0
+ while i < len(word):
+ try:
+ j = word.index(first, i)
+ except ValueError:
+ new_word.extend(word[i:])
+ break
+ else:
+ new_word.extend(word[i:j])
+ i = j
+
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
+ new_word.append(first + second)
+ i += 2
+ else:
+ new_word.append(word[i])
+ i += 1
+ new_word = tuple(new_word)
+ word = new_word
+ if len(word) == 1:
+ break
+ else:
+ pairs = get_pairs(word)
+ word = " ".join(word)
+ self.cache[token] = word
+ return word
+
+ def _tokenize(self, text):
+ """Tokenize a string."""
+ bpe_tokens = []
+ for token in re.findall(self.pat, text):
+ token = "".join(
+ self.byte_encoder[b] for b in token.encode("utf-8")
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
+ bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
+ return bpe_tokens
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.decoder.get(index)
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ text = "".join(tokens)
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
+ return text
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+ merge_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
+ )
+
+ with open(vocab_file, "w", encoding="utf-8") as f:
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
+
+ index = 0
+ with open(merge_file, "w", encoding="utf-8") as writer:
+ writer.write("#version: 0.2\n")
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
+ if index != token_index:
+ logger.warning(
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
+ " Please check that the tokenizer is not corrupted!"
+ )
+ index = token_index
+ writer.write(" ".join(bpe_tokens) + "\n")
+ index += 1
+
+ return vocab_file, merge_file
+
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
+ def __call__(
+ self,
+ table: Union["pd.DataFrame", List["pd.DataFrame"]] = None,
+ query: Optional[Union[TextInput, List[TextInput]]] = None,
+ answer: Union[str, List[str]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ Main method to tokenize and prepare for the model one or several table-sequence pair(s).
+
+ Args:
+ table (`pd.DataFrame`, `List[pd.DataFrame]`):
+ Table(s) containing tabular data.
+ query (`str` or `List[str]`, *optional*):
+ Sentence or batch of sentences related to one or more table(s) to be encoded. Note that the number of
+ sentences must match the number of tables.
+ answer (`str` or `List[str]`, *optional*):
+ Optionally, the corresponding answer to the questions as supervision.
+ """
+
+ if table is not None:
+ return self.source_call_func(
+ table=table,
+ query=query,
+ answer=answer,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+ elif answer is not None:
+ return self.target_call_func(
+ answer=answer,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+ else:
+ raise ValueError("You need to provide either a `table` or an `answer`.")
+
+ def source_call_func(
+ self,
+ table: Union["pd.DataFrame", List["pd.DataFrame"]],
+ query: Optional[Union[TextInput, List[TextInput]]] = None,
+ answer: Union[str, List[str]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ # Input type checking for clearer error
+ valid_table = False
+ valid_query = False
+
+ # Check that table have a valid type
+ if isinstance(table, pd.DataFrame):
+ valid_table = True
+ elif isinstance(table, (list, tuple)) and isinstance(table[0], pd.DataFrame):
+ valid_table = True
+
+ # Check that query have a valid type
+ if query is None or isinstance(query, str):
+ valid_query = True
+ elif isinstance(query, (list, tuple)):
+ if len(query) == 0 or isinstance(query[0], str):
+ valid_query = True
+
+ if not valid_table:
+ raise ValueError(
+ "table input must of type `pd.DataFrame` (single example), `List[pd.DataFrame]` (batch of examples). "
+ )
+ if not valid_query:
+ raise ValueError("query input must of type `str` (single example), `List[str]` (batch of examples). ")
+ is_batched = isinstance(table, (list, tuple)) or isinstance(query, (list, tuple))
+
+ if is_batched:
+ return self.batch_encode_plus(
+ table=table,
+ query=query,
+ answer=answer,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+ else:
+ return self.encode_plus(
+ table=table,
+ query=query,
+ answer=answer,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
+ def batch_encode_plus(
+ self,
+ table: Union["pd.DataFrame", List["pd.DataFrame"]],
+ query: Optional[List[TextInput]] = None,
+ answer: List[str] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str] = None,
+ max_length: Optional[int] = None,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+
+
+ This method is deprecated, `__call__` should be used instead.
+
+
+ """
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ return self._batch_encode_plus(
+ table=table,
+ query=query,
+ answer=answer,
+ add_special_tokens=add_special_tokens,
+ padding_strategy=padding_strategy,
+ truncation_strategy=truncation_strategy,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ def _batch_encode_plus(
+ self,
+ table: Union["pd.DataFrame", List["pd.DataFrame"]],
+ query: Optional[List[TextInput]] = None,
+ answer: Optional[List[str]] = None,
+ add_special_tokens: bool = True,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ if return_offsets_mapping:
+ raise NotImplementedError(
+ "return_offset_mapping is not available when using Python tokenizers. "
+ "To use this feature, change your tokenizer to one deriving from "
+ "transformers.PreTrainedTokenizerFast."
+ )
+
+ if isinstance(table, pd.DataFrame) and isinstance(query, (list, tuple)):
+ # single table, many queries case
+ # duplicate table for every query
+ table = [table] * len(query)
+ if isinstance(table, (list, tuple)) and isinstance(query, str):
+ # many tables, single query case
+ # duplicate query for every table
+ query = [query] * len(table)
+
+ batch_outputs = self._batch_prepare_for_model(
+ table=table,
+ query=query,
+ answer=answer,
+ add_special_tokens=add_special_tokens,
+ padding_strategy=padding_strategy,
+ truncation_strategy=truncation_strategy,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_attention_mask=return_attention_mask,
+ return_token_type_ids=return_token_type_ids,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_length=return_length,
+ return_tensors=return_tensors,
+ verbose=verbose,
+ )
+
+ return BatchEncoding(batch_outputs)
+
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
+ def _batch_prepare_for_model(
+ self,
+ table: Union["pd.DataFrame", List["pd.DataFrame"]],
+ query: Optional[Union[TextInput, List[TextInput]]] = None,
+ answer: Optional[Union[str, List[str]]] = None,
+ add_special_tokens: bool = True,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[str] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ ) -> BatchEncoding:
+ """
+ This method adds special tokens, truncates sequences if overflowing while taking into account the special
+ tokens and manages a moving window (with user defined stride) for overflowing tokens.
+ """
+ batch_outputs = {}
+ if answer is None:
+ answer = [None] * len(table)
+ for _table, _query, _answer in zip(table, query, answer):
+ text = self.prepare_table_query(
+ _table, _query, _answer, truncation_strategy=truncation_strategy, max_length=max_length
+ )
+
+ if self.do_lower_case:
+ text = text.lower()
+
+ tokens = self.tokenize(text)
+ outputs = self.prepare_for_model(
+ ids=self.convert_tokens_to_ids(tokens),
+ add_special_tokens=add_special_tokens,
+ padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterwards
+ truncation=truncation_strategy.value,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=None, # we pad in batch afterwards
+ return_attention_mask=False, # we pad in batch afterwards
+ return_token_type_ids=return_token_type_ids,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_length=return_length,
+ return_tensors=None, # We convert the whole batch to tensors at the end
+ prepend_batch_axis=False,
+ verbose=verbose,
+ )
+
+ for key, value in outputs.items():
+ if key not in batch_outputs:
+ batch_outputs[key] = []
+ batch_outputs[key].append(value)
+
+ batch_outputs = self.pad(
+ batch_outputs,
+ padding=padding_strategy.value,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_attention_mask=return_attention_mask,
+ )
+
+ batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
+
+ return batch_outputs
+
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING)
+ def encode(
+ self,
+ table: "pd.DataFrame",
+ query: Optional[TextInput] = None,
+ answer: Optional[str] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy, TapexTruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ **kwargs,
+ ) -> List[int]:
+ """
+ Prepare a table, a string and possible answer for the model. This method does not return token type IDs,
+ attention masks, etc. which are necessary for the model to work correctly. Use this method if you want to build
+ your processing on your own, otherwise refer to `__call__`.
+ """
+ encoded_inputs = self.encode_plus(
+ table,
+ query=query,
+ answer=answer,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ return_tensors=return_tensors,
+ **kwargs,
+ )
+
+ return encoded_inputs["input_ids"]
+
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
+ def encode_plus(
+ self,
+ table: "pd.DataFrame",
+ query: Optional[TextInput] = None,
+ answer: Optional[str] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str] = None,
+ max_length: Optional[int] = None,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ return self._encode_plus(
+ table=table,
+ query=query,
+ answer=answer,
+ add_special_tokens=add_special_tokens,
+ padding_strategy=padding_strategy,
+ truncation_strategy=truncation_strategy,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ def _encode_plus(
+ self,
+ table: "pd.DataFrame",
+ query: Optional[TextInput] = None,
+ answer: Optional[str] = None,
+ add_special_tokens: bool = True,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ if return_offsets_mapping:
+ raise NotImplementedError(
+ "return_offset_mapping is not available when using Python tokenizers. "
+ "To use this feature, change your tokenizer to one deriving from "
+ "transformers.PreTrainedTokenizerFast. "
+ "More information on available tokenizers at "
+ "https://github.com/huggingface/transformers/pull/2674"
+ )
+
+ text = self.prepare_table_query(
+ table, query, answer, truncation_strategy=truncation_strategy, max_length=max_length
+ )
+
+ # if necessary, perform lower case
+ if self.do_lower_case:
+ text = text.lower()
+
+ tokens = self.tokenize(text)
+
+ return self.prepare_for_model(
+ ids=self.convert_tokens_to_ids(tokens),
+ add_special_tokens=add_special_tokens,
+ padding=padding_strategy.value,
+ truncation=truncation_strategy.value,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ prepend_batch_axis=True,
+ return_attention_mask=return_attention_mask,
+ return_token_type_ids=return_token_type_ids,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_length=return_length,
+ verbose=verbose,
+ )
+
+ def target_call_func(
+ self,
+ answer: Union[str, List[str]],
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ The method tokenizes and prepares the answer label for the model.
+
+ Args:
+ answer (`str` or `List[str]`):
+ Corresponding answer supervision to the queries for training the model.
+ """
+ is_batched = isinstance(answer, (list, tuple))
+
+ if is_batched:
+ return self.target_batch_encode_plus(
+ answer=answer,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+ else:
+ return self.target_encode_plus(
+ answer=answer,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ def target_batch_encode_plus(
+ self,
+ answer: List[str],
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str] = None,
+ max_length: Optional[int] = None,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ Prepare answer strings for the model.
+
+ Args:
+ answer `List[str]`:
+ Corresponding answer supervision to the queries for training the model.
+ """
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ return self._target_batch_encode_plus(
+ answer=answer,
+ add_special_tokens=add_special_tokens,
+ padding_strategy=padding_strategy,
+ truncation_strategy=truncation_strategy,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ def _target_batch_encode_plus(
+ self,
+ answer: List[str],
+ add_special_tokens: bool = True,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ batch_outputs = {}
+ for text in answer:
+ if self.do_lower_case:
+ text = text.lower()
+
+ tokens = self.tokenize(text)
+ outputs = self.prepare_for_model(
+ ids=self.convert_tokens_to_ids(tokens),
+ add_special_tokens=add_special_tokens,
+ padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterwards
+ truncation=truncation_strategy.value,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=None, # we pad in batch afterwards
+ return_attention_mask=False, # we pad in batch afterwards
+ return_token_type_ids=return_token_type_ids,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_length=return_length,
+ return_tensors=None, # We convert the whole batch to tensors at the end
+ prepend_batch_axis=False,
+ verbose=verbose,
+ )
+
+ for key, value in outputs.items():
+ if key not in batch_outputs:
+ batch_outputs[key] = []
+ batch_outputs[key].append(value)
+
+ batch_outputs = self.pad(
+ batch_outputs,
+ padding=padding_strategy.value,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_attention_mask=return_attention_mask,
+ )
+
+ batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
+
+ return BatchEncoding(batch_outputs)
+
+ def target_encode(
+ self,
+ answer: str,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy, TapexTruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ **kwargs,
+ ) -> List[int]:
+ """
+ Prepare the answer string for the model. This method does not return token type IDs, attention masks, etc.
+ which are necessary for the model to work correctly. Use this method if you want to build your processing on
+ your own, otherwise refer to `__call__`.
+
+ Args:
+ answer `str`:
+ Corresponding answer supervision to the queries for training the model
+ """
+ encoded_outputs = self.target_encode_plus(
+ answer=answer,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ return_tensors=return_tensors,
+ **kwargs,
+ )
+
+ return encoded_outputs["input_ids"]
+
+ def target_encode_plus(
+ self,
+ answer: str,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str] = None,
+ max_length: Optional[int] = None,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ Prepare a answer string for the model.
+
+ Args:
+ answer `str`:
+ Corresponding answer supervision to the queries for training the model.
+ """
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ return self._target_encode_plus(
+ answer=answer,
+ add_special_tokens=add_special_tokens,
+ padding_strategy=padding_strategy,
+ truncation_strategy=truncation_strategy,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ def _target_encode_plus(
+ self,
+ answer: str,
+ add_special_tokens: bool = True,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ if return_offsets_mapping:
+ raise NotImplementedError(
+ "return_offset_mapping is not available when using Python tokenizers. "
+ "To use this feature, change your tokenizer to one deriving from "
+ "transformers.PreTrainedTokenizerFast. "
+ "More information on available tokenizers at "
+ "https://github.com/huggingface/transformers/pull/2674"
+ )
+
+ text = answer
+
+ # if necessary, perform lower case
+ if self.do_lower_case:
+ text = text.lower()
+
+ tokens = self.tokenize(text)
+
+ return self.prepare_for_model(
+ ids=self.convert_tokens_to_ids(tokens),
+ add_special_tokens=add_special_tokens,
+ padding=padding_strategy.value,
+ truncation=truncation_strategy.value,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ prepend_batch_axis=True,
+ return_attention_mask=return_attention_mask,
+ return_token_type_ids=return_token_type_ids,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_length=return_length,
+ verbose=verbose,
+ )
+
+ def prepare_table_query(
+ self,
+ table,
+ query,
+ answer=None,
+ truncation_strategy=Union[str, TruncationStrategy, TapexTruncationStrategy],
+ max_length=None,
+ ):
+ """
+ This method can be used to linearize a table and add a corresponding query.
+
+ Optionally, it also handles truncation of the table (cells).
+
+ An answer can be provided for more precise truncation.
+ """
+ if not table.empty:
+ # step 1: create table dictionary
+ table_content = {"header": list(table.columns), "rows": [list(row.values) for i, row in table.iterrows()]}
+
+ # step 2: modify table internally
+ # always truncate table cells based on self.max_cell_length
+ # optionally truncate rows if truncation_strategy is set to it
+ self.truncate_table_cells(table_content, query, answer)
+ if truncation_strategy == TapexTruncationStrategy.DROP_ROWS_TO_FIT:
+ self.truncate_table_rows(table_content, query, answer, max_length=max_length)
+
+ # step 3: linearize table
+ linear_table = self.table_linearize.process_table(table_content)
+ else:
+ linear_table = ""
+
+ if linear_table == "":
+ logger.warning(
+ "You provide an empty table, or all cells contain much tokens (e.g., >= 1024 tokens). "
+ + f"Please carefully check the corresponding table with the query : {query}."
+ )
+ if query == "":
+ logger.warning("You provide nothing to query with respect to the table.")
+ # step 4: concatenate query with linear_table
+ separator = " " if query and linear_table else ""
+ joint_input = (query + separator + linear_table) if query else linear_table
+
+ return joint_input
+
+ def truncate_table_cells(self, table_content: Dict, question: str, answer: List):
+ # TODO (Qian): is it possible to revert the original cell if it is in the final answer?
+ cell_mapping = {}
+ for row in table_content["rows"]:
+ for i, cell in enumerate(row):
+ truncate_cell = self.truncate_cell(cell)
+ if truncate_cell is not None:
+ cell_mapping[cell] = truncate_cell
+ row[i] = truncate_cell
+
+ # modify the answer list
+ if answer is not None:
+ for i, case in enumerate(answer):
+ if case in cell_mapping.keys():
+ answer[i] = cell_mapping[case]
+
+ def truncate_cell(self, cell_value):
+ # do not process on these cases
+ if isinstance(cell_value, int) or isinstance(cell_value, float):
+ return cell_value
+ if cell_value.strip() != "":
+ try_tokens = self.tokenize(cell_value)
+ if len(try_tokens) >= self.max_cell_length:
+ retain_tokens = try_tokens[: self.max_cell_length]
+ retain_cell_value = self.convert_tokens_to_string(retain_tokens)
+ return retain_cell_value
+ else:
+ return None
+ else:
+ return cell_value
+
+ def truncate_table_rows(
+ self, table_content: Dict, question: str, answer: Optional[Union[str, List[str]]] = None, max_length=None
+ ):
+ """
+ Args:
+ table_content:
+ {"header": xxx, "rows": xxx, "id" (Optionally): xxx}
+
+ question:
+ natural language sentence
+
+ answer:
+ if for training, is the supervision; otherwise will be empty
+ """
+ delete_ratio, remain_token_len = self.estimate_delete_ratio(table_content, question, max_length)
+ # randomly delete unrelated rows
+ self.delete_unrelated_rows(table_content, question, answer, delete_ratio)
+ # guarantee the result < max_length
+ maximum_keep_rows = 0
+ for ind, row_example in enumerate(table_content["rows"]):
+ value_string = self.table_linearize.process_row(row_example, ind + 1)
+ value_token_len = len(self.tokenize(value_string))
+ # over the size limit, and take action
+ if value_token_len > remain_token_len:
+ break
+ remain_token_len -= value_token_len
+ maximum_keep_rows += 1
+ del table_content["rows"][maximum_keep_rows:]
+
+ def estimate_delete_ratio(self, table_content: Dict, question: str, max_length=None):
+ if "header" not in table_content or "rows" not in table_content:
+ raise ValueError("The table content should contain both 'header' and 'rows' keys.")
+ # calculate the tokens of header, special tokens will only be pre-prepended into question
+ question_tokens = self.tokenize(question, add_special_tokens=True)
+ # calculate the tokens of header
+ header_string = self.table_linearize.process_header(table_content["header"])
+ header_tokens = self.tokenize(header_string, add_special_tokens=False)
+ # split all cell values into tokens and see how many can be accommodated
+ used_token_len = len(question_tokens) + len(header_tokens)
+ # remaining token space for rows
+ remain_token_len = max_length - used_token_len
+
+ value_string = ""
+ for _, row_example in enumerate(table_content["rows"]):
+ # use a general index to roughly estimate the overall token len
+ value_string += self.table_linearize.process_row(row_example, 100) + " "
+ value_token_len = len(self.tokenize(value_string))
+
+ if value_token_len < remain_token_len:
+ # no row will be deleted
+ return 0.0, remain_token_len
+ else:
+ # calc a roughly delete rate
+ return 1.0 - remain_token_len / value_token_len, remain_token_len
+
+ def delete_unrelated_rows(self, table_content: Dict, question: str, answer: List, delete_ratio: float):
+ """
+ The argument answer is used only during training.
+ """
+ truncated_unrelated_indices = []
+ related_indices = []
+ if answer is None or len(answer) == 0:
+ answer_set = set()
+ else:
+ answer_set = {ans_ex.lower() for ans_ex in answer}
+ # add question key words into answer set
+ if question is not None:
+ answer_set.update(question.split())
+ question_set = set(question.strip("?!.,").split(" "))
+ row_max_len = len(table_content["rows"])
+ for _row_idx, row in enumerate(table_content["rows"]):
+ lower_row = {str(cell).lower() for cell in row}
+ if len(lower_row & answer_set) == 0 and len(lower_row & question_set) == 0:
+ truncated_unrelated_indices.append(_row_idx)
+ else:
+ # add neighbours to preserve information aggressively
+ related_indices.extend([_row_idx - 2, _row_idx - 1, _row_idx, _row_idx + 1, _row_idx + 2])
+
+ # remove the neighbours
+ truncated_unrelated_indices = [
+ _row_idx for _row_idx in truncated_unrelated_indices if _row_idx not in related_indices
+ ]
+ # select some cases to drop
+ drop_items = min(len(truncated_unrelated_indices), int(len(table_content["rows"]) * delete_ratio))
+ drop_row_indices = random.choices(truncated_unrelated_indices, k=drop_items)
+
+ for _row_idx in reversed(range(row_max_len)):
+ if _row_idx in drop_row_indices:
+ del table_content["rows"][_row_idx]
+
+ # only when the drop ratio is too large, logging for warning.
+ if "id" in table_content and len(drop_row_indices) > 0:
+ logger.warning("Delete {:.2f} rows in table {}".format(len(drop_row_indices), table_content["id"]))
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..b7af1bb48cb7d6a495611b0dadfc910779262813
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__init__.py
@@ -0,0 +1,63 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
+
+
+_import_structure = {
+ "configuration_trajectory_transformer": [
+ "TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
+ "TrajectoryTransformerConfig",
+ ],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_trajectory_transformer"] = [
+ "TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TrajectoryTransformerModel",
+ "TrajectoryTransformerPreTrainedModel",
+ "load_tf_weights_in_trajectory_transformer",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_trajectory_transformer import (
+ TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
+ TrajectoryTransformerConfig,
+ )
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_trajectory_transformer import (
+ TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TrajectoryTransformerModel,
+ TrajectoryTransformerPreTrainedModel,
+ load_tf_weights_in_trajectory_transformer,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4cc131c258eeaf0170bc9fac690992d910d98237
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/configuration_trajectory_transformer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/configuration_trajectory_transformer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f510464e98d73c9aa7da04c63a5e914ed87f2166
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/configuration_trajectory_transformer.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4eb23f26a2cc2abafeb406233b87e5e947545b77
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/modeling_trajectory_transformer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/modeling_trajectory_transformer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..280fafe1f85860d9810229c1558257772f8f899a
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/__pycache__/modeling_trajectory_transformer.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/configuration_trajectory_transformer.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/configuration_trajectory_transformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..eccb71fcc429e742c2df458f5c831f8ca2df451d
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/configuration_trajectory_transformer.py
@@ -0,0 +1,155 @@
+# coding=utf-8
+# Copyright 2022 The Trajectory Transformers paper authors and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" TrajectoryTransformer model configuration"""
+
+from ....configuration_utils import PretrainedConfig
+from ....utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from .._archive_maps import TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class TrajectoryTransformerConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`TrajectoryTransformerModel`]. It is used to
+ instantiate an TrajectoryTransformer model according to the specified arguments, defining the model architecture.
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the
+ TrajectoryTransformer
+ [CarlCochet/trajectory-transformer-halfcheetah-medium-v2](https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2)
+ architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 100):
+ Vocabulary size of the TrajectoryTransformer model. Defines the number of different tokens that can be
+ represented by the `trajectories` passed when calling [`TrajectoryTransformerModel`]
+ action_weight (`int`, *optional*, defaults to 5):
+ Weight of the action in the loss function
+ reward_weight (`int`, *optional*, defaults to 1):
+ Weight of the reward in the loss function
+ value_weight (`int`, *optional*, defaults to 1):
+ Weight of the value in the loss function
+ block_size (`int`, *optional*, defaults to 249):
+ Size of the blocks in the trajectory transformer.
+ action_dim (`int`, *optional*, defaults to 6):
+ Dimension of the action space.
+ observation_dim (`int`, *optional*, defaults to 17):
+ Dimension of the observation space.
+ transition_dim (`int`, *optional*, defaults to 25):
+ Dimension of the transition space.
+ n_layer (`int`, *optional*, defaults to 4):
+ Number of hidden layers in the Transformer encoder.
+ n_head (`int`, *optional*, defaults to 4):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ n_embd (`int`, *optional*, defaults to 128):
+ Dimensionality of the embeddings and hidden states.
+ resid_pdrop (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ embd_pdrop (`int`, *optional*, defaults to 0.1):
+ The dropout ratio for the embeddings.
+ attn_pdrop (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ kaiming_initializer_range (`float, *optional*, defaults to 1):
+ A coefficient scaling the negative slope of the kaiming initializer rectifier for EinLinear layers.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`.
+ Example:
+
+ ```python
+ >>> from transformers import TrajectoryTransformerConfig, TrajectoryTransformerModel
+
+ >>> # Initializing a TrajectoryTransformer CarlCochet/trajectory-transformer-halfcheetah-medium-v2 style configuration
+ >>> configuration = TrajectoryTransformerConfig()
+
+ >>> # Initializing a model (with random weights) from the CarlCochet/trajectory-transformer-halfcheetah-medium-v2 style configuration
+ >>> model = TrajectoryTransformerModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "trajectory_transformer"
+ keys_to_ignore_at_inference = ["past_key_values"]
+ attribute_map = {
+ "hidden_size": "n_embd",
+ "num_attention_heads": "n_head",
+ "num_hidden_layers": "n_layer",
+ }
+
+ def __init__(
+ self,
+ vocab_size=100,
+ action_weight=5,
+ reward_weight=1,
+ value_weight=1,
+ block_size=249,
+ action_dim=6,
+ observation_dim=17,
+ transition_dim=25,
+ n_layer=4,
+ n_head=4,
+ n_embd=128,
+ embd_pdrop=0.1,
+ attn_pdrop=0.1,
+ resid_pdrop=0.1,
+ learning_rate=0.0006,
+ max_position_embeddings=512,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ kaiming_initializer_range=1,
+ use_cache=True,
+ pad_token_id=1,
+ bos_token_id=50256,
+ eos_token_id=50256,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.action_weight = action_weight
+ self.reward_weight = reward_weight
+ self.value_weight = value_weight
+ self.max_position_embeddings = max_position_embeddings
+ self.block_size = block_size
+ self.action_dim = action_dim
+ self.observation_dim = observation_dim
+ self.transition_dim = transition_dim
+ self.learning_rate = learning_rate
+ self.n_layer = n_layer
+ self.n_head = n_head
+ self.n_embd = n_embd
+ self.embd_pdrop = embd_pdrop
+ self.attn_pdrop = attn_pdrop
+ self.resid_pdrop = resid_pdrop
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.kaiming_initializer_range = kaiming_initializer_range
+ self.use_cache = use_cache
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..622552fa78360826fc976d6f1d8c97fcc74a8a38
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.py
@@ -0,0 +1,70 @@
+# coding=utf-8
+# Copyright 2022 The Trajectory Transformers paper authors and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" TrajectoryTransformer pytorch checkpoint conversion"""
+
+import torch
+import trajectory.utils as utils
+
+from transformers import TrajectoryTransformerModel
+
+
+class Parser(utils.Parser):
+ dataset: str = "halfcheetah-medium-expert-v2"
+ config: str = "config.offline"
+
+
+def convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch(logbase, dataset, loadpath, epoch, device):
+ """Converting Sequential blocks to ModuleList"""
+
+ gpt, gpt_epoch = utils.load_model(logbase, dataset, loadpath, epoch=epoch, device=device)
+ trajectory_transformer = TrajectoryTransformerModel(gpt.config)
+
+ trajectory_transformer.tok_emb.load_state_dict(gpt.tok_emb.state_dict())
+ trajectory_transformer.pos_emb = gpt.pos_emb
+ trajectory_transformer.drop.load_state_dict(gpt.drop.state_dict())
+ trajectory_transformer.ln_f.load_state_dict(gpt.ln_f.state_dict())
+ trajectory_transformer.head.load_state_dict(gpt.head.state_dict())
+
+ for i, block in enumerate(gpt.blocks):
+ trajectory_transformer.blocks[i].ln1.load_state_dict(gpt.blocks[i].ln1.state_dict())
+ trajectory_transformer.blocks[i].ln2.load_state_dict(gpt.blocks[i].ln2.state_dict())
+ trajectory_transformer.blocks[i].attn.load_state_dict(gpt.blocks[i].attn.state_dict())
+
+ trajectory_transformer.blocks[i].l1.load_state_dict(gpt.blocks[i].mlp[0].state_dict())
+ trajectory_transformer.blocks[i].act.load_state_dict(gpt.blocks[i].mlp[1].state_dict())
+ trajectory_transformer.blocks[i].l2.load_state_dict(gpt.blocks[i].mlp[2].state_dict())
+ trajectory_transformer.blocks[i].drop.load_state_dict(gpt.blocks[i].mlp[3].state_dict())
+
+ torch.save(trajectory_transformer.state_dict(), "pytorch_model.bin")
+
+
+if __name__ == "__main__":
+ """
+ To run this script you will need to install the original repository to run the original model. You can find it
+ here: https://github.com/jannerm/trajectory-transformer From this repository code you can also download the
+ original pytorch checkpoints.
+
+ Run with the command:
+
+ ```sh
+ >>> python convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.py --dataset
+ ... --gpt_loadpath
+ ```
+ """
+
+ args = Parser().parse_args("plan")
+ convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch(
+ args.logbase, args.dataset, args.gpt_loadpath, args.gpt_epoch, args.device
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/modeling_trajectory_transformer.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/modeling_trajectory_transformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c98aa45dc2739336b75e72167c739160cf5d11f
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/trajectory_transformer/modeling_trajectory_transformer.py
@@ -0,0 +1,606 @@
+# coding=utf-8
+# Copyright 2022 The Trajectory Transformers paper authors and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch TrajectoryTransformer model."""
+
+import math
+import os
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import numpy as np
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import functional as F
+
+from ....modeling_utils import PreTrainedModel
+from ....utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_trajectory_transformer import TrajectoryTransformerConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "CarlCochet/trajectory-transformer-halfcheetah-medium-v2"
+_CONFIG_FOR_DOC = "TrajectoryTransformerConfig"
+
+
+from .._archive_maps import TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+def load_tf_weights_in_trajectory_transformer(model, config, tf_checkpoint_path):
+ """Load tf checkpoints in a pytorch model."""
+ try:
+ import re
+
+ import numpy as np
+ import tensorflow as tf
+ except ImportError:
+ logger.error(
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
+ "https://www.tensorflow.org/install/ for installation instructions."
+ )
+ raise
+ tf_path = os.path.abspath(tf_checkpoint_path)
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
+ # Load weights from TF model
+ init_vars = tf.train.list_variables(tf_path)
+ names = []
+ arrays = []
+ for name, shape in init_vars:
+ logger.info(f"Loading TF weight {name} with shape {shape}")
+ array = tf.train.load_variable(tf_path, name)
+ names.append(name)
+ arrays.append(array)
+
+ for name, array in zip(names, arrays):
+ name = name.split("/")
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
+ # which are not required for using pretrained model
+ if any(
+ n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
+ for n in name
+ ):
+ logger.info(f"Skipping {'/'.join(name)}")
+ continue
+ pointer = model
+ for m_name in name:
+ if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
+ scope_names = re.split(r"_(\d+)", m_name)
+ else:
+ scope_names = [m_name]
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
+ pointer = getattr(pointer, "weight")
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
+ pointer = getattr(pointer, "bias")
+ elif scope_names[0] == "output_weights":
+ pointer = getattr(pointer, "weight")
+ elif scope_names[0] == "squad":
+ pointer = getattr(pointer, "classifier")
+ else:
+ try:
+ pointer = getattr(pointer, scope_names[0])
+ except AttributeError:
+ logger.info(f"Skipping {'/'.join(name)}")
+ continue
+ if len(scope_names) >= 2:
+ num = int(scope_names[1])
+ pointer = pointer[num]
+ if m_name[-11:] == "_embeddings":
+ pointer = getattr(pointer, "weight")
+ elif m_name == "kernel":
+ array = np.transpose(array)
+ try:
+ if pointer.shape != array.shape:
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
+ except AssertionError as e:
+ e.args += (pointer.shape, array.shape)
+ raise
+ logger.info(f"Initialize PyTorch weight {name}")
+ pointer.data = torch.from_numpy(array)
+ return model
+
+
+@dataclass
+class TrajectoryTransformerOutput(ModelOutput):
+ """
+ Base class for model's outputs that also contains a pooling of the last hidden states.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ past_key_values (`Tuple[Tuple[torch.Tensor]]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of length `config.n_layers`, containing tuples of tensors of shape `(batch_size, num_heads,
+ sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the
+ attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
+ plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. GPT2Attentions weights after the attention softmax, used to compute the weighted average
+ in the self-attention heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+class TrajectoryTransformerPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = TrajectoryTransformerConfig
+ load_tf_weights = load_tf_weights_in_trajectory_transformer
+ base_model_prefix = "trajectory_transformer"
+ main_input_name = "trajectories"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ if isinstance(module, (nn.Linear, nn.Embedding)):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if isinstance(module, nn.Linear) and module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+ elif isinstance(module, EinLinear):
+ for i in range(module.n_models):
+ nn.init.kaiming_uniform_(module.weight[i], a=math.sqrt(5) / self.config.kaiming_initializer_range)
+ if module.bias is not None:
+ fan_in, _ = nn.init._calculate_fan_in_and_fan_out(module.weight[i])
+ bound = (1 / math.sqrt(fan_in)) * self.config.initializer_range
+ nn.init.uniform_(module.bias[i], -bound, bound)
+
+
+TRAJECTORY_TRANSFORMER_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`TrajectoryTransformerConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+TRAJECTORY_TRANSFORMER_INPUTS_DOCSTRING = r"""
+ Args:
+ trajectories (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Batch of trajectories, where a trajectory is a sequence of states, actions and rewards.
+ past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`, *optional*):
+ Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see
+ `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have
+ their past given to this model should not be passed as `input_ids` as they have already been computed.
+ targets (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Desired targets used to compute the loss.
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+class EinLinear(nn.Module):
+ def __init__(self, n_models, in_features, out_features, bias):
+ super().__init__()
+ self.n_models = n_models
+ self.out_features = out_features
+ self.in_features = in_features
+ self.weight = nn.Parameter(torch.Tensor(n_models, out_features, in_features))
+ if bias:
+ self.bias = nn.Parameter(torch.Tensor(n_models, out_features))
+ else:
+ self.register_parameter("bias", None)
+
+ def reset_parameters(self):
+ for i in range(self.n_models):
+ nn.init.kaiming_uniform_(self.weight[i], a=math.sqrt(5))
+ if self.bias is not None:
+ fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight[i])
+ bound = 1 / math.sqrt(fan_in)
+ nn.init.uniform_(self.bias[i], -bound, bound)
+
+ def forward(self, input):
+ """
+ Args:
+ input (`torch.FloatTensor` of shape `(B, n_models, input_dim)`):
+ The input to the layer.
+ """
+ # [ batch_size x n_models x output_dim ]
+ output = torch.einsum("eoi,bei->beo", self.weight, input)
+ if self.bias is not None:
+ raise RuntimeError()
+ return output
+
+
+class CausalSelfAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+
+ if config.n_embd % config.n_head != 0:
+ raise ValueError(f"n_head ({config.n_head}) should be a divisor of n_embd ({config.n_embd})")
+
+ # key, query, value projections for all heads
+ self.key = nn.Linear(config.n_embd, config.n_embd)
+ self.query = nn.Linear(config.n_embd, config.n_embd)
+ self.value = nn.Linear(config.n_embd, config.n_embd)
+
+ # regularization
+ self.attn_drop = nn.Dropout(config.attn_pdrop)
+ self.resid_drop = nn.Dropout(config.resid_pdrop)
+
+ # output projection
+ self.proj = nn.Linear(config.n_embd, config.n_embd)
+
+ # causal mask to ensure that attention is only applied to the left in the input sequence
+ self.register_buffer(
+ "mask",
+ torch.tril(torch.ones(config.block_size, config.block_size)).view(
+ 1, 1, config.block_size, config.block_size
+ ),
+ persistent=False,
+ )
+
+ # mask previous value estimates
+ joined_dim = config.observation_dim + config.action_dim + 2
+ self.mask.squeeze()[:, joined_dim - 1 :: joined_dim] = 0
+
+ self.n_head = config.n_head
+
+ def forward(
+ self,
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
+ use_cache: Optional[bool] = False,
+ output_attentions: Optional[bool] = False,
+ ):
+ batch_size, sequence_length, embedding_dim = hidden_states.size()
+
+ # calculate query, key, values for all heads in batch and move head forward to be the batch dim
+ # [ batch_size x n_heads x sequence_length x head_dim ]
+ key = (
+ self.key(hidden_states)
+ .view(batch_size, sequence_length, self.n_head, embedding_dim // self.n_head)
+ .transpose(1, 2)
+ )
+ query = (
+ self.query(hidden_states)
+ .view(batch_size, sequence_length, self.n_head, embedding_dim // self.n_head)
+ .transpose(1, 2)
+ )
+ value = (
+ self.value(hidden_states)
+ .view(batch_size, sequence_length, self.n_head, embedding_dim // self.n_head)
+ .transpose(1, 2)
+ )
+
+ if layer_past is not None:
+ past_key, past_value = layer_past
+ key = torch.cat((past_key, key), dim=-2)
+ value = torch.cat((past_value, value), dim=-2)
+
+ if use_cache is True:
+ present = (key, value)
+ else:
+ present = None
+
+ # causal self-attention
+ # [ batch_size x n_heads x sequence_length x sequence_length ]
+ attn_weights = (torch.matmul(query, key.transpose(-2, -1))) * (1.0 / math.sqrt(key.size(-1)))
+ attn_weights = attn_weights.masked_fill(
+ self.mask[:, :, :sequence_length, :sequence_length] == 0, torch.finfo(attn_weights.dtype).min
+ )
+ attn_weights = F.softmax(attn_weights, dim=-1)
+ self._attn_map = attn_weights.clone()
+ attn_weights = self.attn_drop(attn_weights)
+
+ output = torch.matmul(attn_weights, value)
+ # [ batch_size x sequence_length x embedding_dim ]
+ # re-assemble all head outputs side by side
+ output = output.transpose(1, 2).contiguous().view(batch_size, sequence_length, embedding_dim)
+
+ # output projection
+ output = self.resid_drop(self.proj(output))
+
+ outputs = (output, present)
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+class Block(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.ln1 = nn.LayerNorm(config.n_embd)
+ self.ln2 = nn.LayerNorm(config.n_embd)
+ self.attn = CausalSelfAttention(config)
+
+ # MLP
+ self.l1 = nn.Linear(config.n_embd, 4 * config.n_embd)
+ self.act = nn.GELU()
+ self.l2 = nn.Linear(4 * config.n_embd, config.n_embd)
+ self.drop = nn.Dropout(config.resid_pdrop)
+
+ def forward(
+ self,
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
+ use_cache: Optional[bool] = False,
+ output_attentions: Optional[bool] = False,
+ ):
+ residual = hidden_states
+ hidden_states = self.ln1(hidden_states)
+
+ attn_outputs = self.attn(
+ hidden_states, layer_past=layer_past, use_cache=use_cache, output_attentions=output_attentions
+ )
+ attn_output = attn_outputs[0]
+ outputs = attn_outputs[1:]
+ hidden_states = attn_output + residual
+
+ residual = hidden_states
+ hidden_states = self.ln2(hidden_states)
+ hidden_states = self.l1(hidden_states)
+ hidden_states = self.act(hidden_states)
+ hidden_states = self.l2(hidden_states)
+ hidden_states = residual + self.drop(hidden_states)
+
+ if use_cache:
+ outputs = (hidden_states,) + outputs
+ else:
+ outputs = (hidden_states,) + outputs[1:]
+
+ return outputs
+
+
+@add_start_docstrings(
+ "The bare TrajectoryTransformer Model transformer outputting raw hidden-states without any specific head on top.",
+ TRAJECTORY_TRANSFORMER_START_DOCSTRING,
+)
+class TrajectoryTransformerModel(TrajectoryTransformerPreTrainedModel):
+ """the full GPT language model, with a context size of block_size"""
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ # input embedding stem (+1 for stop token)
+ self.tok_emb = nn.Embedding(config.vocab_size * config.transition_dim + 1, config.n_embd)
+
+ self.pos_emb = nn.Parameter(torch.zeros(1, config.block_size, config.n_embd))
+ self.drop = nn.Dropout(config.embd_pdrop)
+ # transformer
+ self.blocks = nn.ModuleList([Block(config) for _ in range(config.n_layer)])
+ # decoder head
+ self.ln_f = nn.LayerNorm(config.n_embd)
+ self.head = EinLinear(config.transition_dim, config.n_embd, config.vocab_size + 1, bias=False)
+
+ self.vocab_size = config.vocab_size
+ self.stop_token = config.vocab_size * config.transition_dim
+ self.block_size = config.block_size
+
+ self.observation_dim = config.observation_dim
+ self.action_dim = config.action_dim
+ self.transition_dim = config.transition_dim
+ self.embedding_dim = config.n_embd
+
+ self.action_weight = config.action_weight
+ self.reward_weight = config.reward_weight
+ self.value_weight = config.value_weight
+
+ self.gradient_checkpointing = False
+
+ self.post_init()
+
+ def get_block_size(self):
+ return self.block_size
+
+ def offset_tokens(self, trajectories):
+ _, sequence_length = trajectories.shape
+
+ n_states = int(np.ceil(sequence_length / self.transition_dim))
+
+ offsets = torch.arange(self.transition_dim) * self.vocab_size
+ offsets = offsets.repeat(n_states).to(trajectories.device)
+
+ offset_trajectories = trajectories + offsets[:sequence_length]
+ offset_trajectories[trajectories == self.vocab_size] = self.stop_token
+ return offset_trajectories
+
+ def pad_to_full_observation(self, hidden_states):
+ batch_size, sequence_length, _ = hidden_states.shape
+
+ n_pad = (self.transition_dim - sequence_length % self.transition_dim) % self.transition_dim
+ padding = torch.zeros(batch_size, n_pad, self.embedding_dim, device=hidden_states.device)
+
+ # [ batch_size x padded_sequence_length' x embedding_dim ]
+ hidden_states_pad = torch.cat([hidden_states, padding], dim=1)
+ hidden_states_pad = hidden_states_pad.view(-1, self.transition_dim, self.embedding_dim)
+
+ return hidden_states_pad, n_pad
+
+ @add_start_docstrings_to_model_forward(
+ TRAJECTORY_TRANSFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")
+ )
+ @replace_return_docstrings(output_type=TrajectoryTransformerOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ trajectories: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ targets: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], TrajectoryTransformerOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import TrajectoryTransformerModel
+ >>> import torch
+
+ >>> model = TrajectoryTransformerModel.from_pretrained(
+ ... "CarlCochet/trajectory-transformer-halfcheetah-medium-v2"
+ ... )
+ >>> model.to(device)
+ >>> model.eval()
+
+ >>> observations_dim, action_dim, batch_size = 17, 6, 256
+ >>> seq_length = observations_dim + action_dim + 1
+
+ >>> trajectories = torch.LongTensor([np.random.permutation(self.seq_length) for _ in range(batch_size)]).to(
+ ... device
+ ... )
+ >>> targets = torch.LongTensor([np.random.permutation(self.seq_length) for _ in range(batch_size)]).to(device)
+
+ >>> outputs = model(
+ ... trajectories,
+ ... targets=targets,
+ ... use_cache=True,
+ ... output_attentions=True,
+ ... output_hidden_states=True,
+ ... return_dict=True,
+ ... )
+ ```
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+
+ if past_key_values is None:
+ past_key_values = tuple([None] * len(self.blocks))
+
+ batch_size, sequence_length = trajectories.size()
+
+ if sequence_length > self.block_size:
+ raise ValueError("Cannot forward, model block size is exhausted.")
+
+ offset_trajectories = self.offset_tokens(trajectories)
+ # [ batch_size x sequence_length x embedding_dim ]
+ # forward the GPT model
+ token_embeddings = self.tok_emb(offset_trajectories) # each index maps to a (learnable) vector
+ position_embeddings = self.pos_emb[:, :sequence_length, :] # each position maps to a (learnable) vector
+
+ hidden_states = self.drop(token_embeddings + position_embeddings)
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ presents = () if use_cache else None
+ all_self_attentions = () if output_attentions else None
+ all_hidden_states = () if output_hidden_states else None
+
+ for i, (block, layer_past) in enumerate(zip(self.blocks, past_key_values)):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ outputs = self._gradient_checkpointing_func(
+ block.__call__,
+ hidden_states,
+ layer_past,
+ use_cache,
+ output_attentions,
+ )
+ else:
+ outputs = block(hidden_states, layer_past, use_cache, output_attentions)
+
+ hidden_states = outputs[0]
+ if use_cache is True:
+ presents = presents + (outputs[1],)
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
+
+ # [ batch_size x sequence_length x embedding_dim ]
+ hidden_state = self.ln_f(hidden_states)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ hidden_states_pad, n_pad = self.pad_to_full_observation(hidden_state)
+
+ logits = self.head(hidden_states_pad)
+ logits = logits.reshape(batch_size, sequence_length + n_pad, self.vocab_size + 1)
+ logits = logits[:, :sequence_length]
+
+ # if we are given some desired targets also calculate the loss
+ if targets is not None:
+ loss = F.cross_entropy(logits.reshape(-1, logits.size(-1)), targets.view(-1), reduction="none")
+ if self.action_weight != 1 or self.reward_weight != 1 or self.value_weight != 1:
+ # make weights
+ n_states = int(np.ceil(sequence_length / self.transition_dim))
+ weights = torch.cat(
+ [
+ torch.ones(self.observation_dim, device=trajectories.device),
+ torch.ones(self.action_dim, device=trajectories.device) * self.action_weight,
+ torch.ones(1, device=trajectories.device) * self.reward_weight,
+ torch.ones(1, device=trajectories.device) * self.value_weight,
+ ]
+ )
+ weights = weights.repeat(n_states)
+ weights = weights[1:].repeat(batch_size, 1)
+ loss = loss * weights.view(-1)
+ loss = (loss * attention_mask.view(-1)).mean()
+ else:
+ loss = None
+
+ if not return_dict:
+ return tuple(v for v in [loss, logits, presents, all_hidden_states, all_self_attentions] if v is not None)
+
+ return TrajectoryTransformerOutput(
+ loss=loss,
+ logits=logits,
+ past_key_values=presents,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/configuration_transfo_xl.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/configuration_transfo_xl.py
new file mode 100644
index 0000000000000000000000000000000000000000..50bf94ae7ea3983d40dcf3d03c2e4e1027c23c80
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/configuration_transfo_xl.py
@@ -0,0 +1,189 @@
+# coding=utf-8
+# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Transformer XL configuration"""
+
+from ....configuration_utils import PretrainedConfig
+from ....utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from .._archive_maps import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class TransfoXLConfig(PretrainedConfig):
+ """
+ This is the configuration class to store the configuration of a [`TransfoXLModel`] or a [`TFTransfoXLModel`]. It is
+ used to instantiate a Transformer-XL model according to the specified arguments, defining the model architecture.
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the TransfoXL
+ [transfo-xl/transfo-xl-wt103](https://huggingface.co/transfo-xl/transfo-xl-wt103) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 267735):
+ Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`TransfoXLModel`] or [`TFTransfoXLModel`].
+ cutoffs (`List[int]`, *optional*, defaults to `[20000, 40000, 200000]`):
+ Cutoffs for the adaptive softmax.
+ d_model (`int`, *optional*, defaults to 1024):
+ Dimensionality of the model's hidden states.
+ d_embed (`int`, *optional*, defaults to 1024):
+ Dimensionality of the embeddings
+ n_head (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ d_head (`int`, *optional*, defaults to 64):
+ Dimensionality of the model's heads.
+ d_inner (`int`, *optional*, defaults to 4096):
+ Inner dimension in FF
+ div_val (`int`, *optional*, defaults to 4):
+ Divident value for adapative input and softmax
+ pre_lnorm (`boolean`, *optional*, defaults to `False`):
+ Whether or not to apply LayerNorm to the input instead of the output in the blocks.
+ n_layer (`int`, *optional*, defaults to 18):
+ Number of hidden layers in the Transformer encoder.
+ mem_len (`int`, *optional*, defaults to 1600):
+ Length of the retained previous heads.
+ clamp_len (`int`, *optional*, defaults to 1000):
+ Use the same pos embeddings after clamp_len.
+ same_length (`boolean`, *optional*, defaults to `True`):
+ Whether or not to use the same attn length for all tokens
+ proj_share_all_but_first (`boolean`, *optional*, defaults to `True`):
+ True to share all but first projs, False not to share.
+ attn_type (`int`, *optional*, defaults to 0):
+ Attention type. 0 for Transformer-XL, 1 for Shaw et al, 2 for Vaswani et al, 3 for Al Rfou et al.
+ sample_softmax (`int`, *optional*, defaults to -1):
+ Number of samples in the sampled softmax.
+ adaptive (`boolean`, *optional*, defaults to `True`):
+ Whether or not to use adaptive softmax.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ dropatt (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ untie_r (`boolean`, *optional*, defaults to `True`):
+ Whether ot not to untie relative position biases.
+ init (`str`, *optional*, defaults to `"normal"`):
+ Parameter initializer to use.
+ init_range (`float`, *optional*, defaults to 0.01):
+ Parameters initialized by U(-init_range, init_range).
+ proj_init_std (`float`, *optional*, defaults to 0.01):
+ Parameters initialized by N(0, init_std)
+ init_std (`float`, *optional*, defaults to 0.02):
+ Parameters initialized by N(0, init_std)
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
+ The epsilon to use in the layer normalization layers
+ eos_token_id (`int`, *optional*, defaults to 0):
+ End of stream token id.
+
+ Examples:
+
+ ```python
+ >>> from transformers import TransfoXLConfig, TransfoXLModel
+
+ >>> # Initializing a Transformer XL configuration
+ >>> configuration = TransfoXLConfig()
+
+ >>> # Initializing a model (with random weights) from the configuration
+ >>> model = TransfoXLModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "transfo-xl"
+ keys_to_ignore_at_inference = ["mems"]
+ attribute_map = {
+ "n_token": "vocab_size",
+ "hidden_size": "d_model",
+ "num_attention_heads": "n_head",
+ "num_hidden_layers": "n_layer",
+ }
+
+ def __init__(
+ self,
+ vocab_size=267735,
+ cutoffs=[20000, 40000, 200000],
+ d_model=1024,
+ d_embed=1024,
+ n_head=16,
+ d_head=64,
+ d_inner=4096,
+ div_val=4,
+ pre_lnorm=False,
+ n_layer=18,
+ mem_len=1600,
+ clamp_len=1000,
+ same_length=True,
+ proj_share_all_but_first=True,
+ attn_type=0,
+ sample_softmax=-1,
+ adaptive=True,
+ dropout=0.1,
+ dropatt=0.0,
+ untie_r=True,
+ init="normal",
+ init_range=0.01,
+ proj_init_std=0.01,
+ init_std=0.02,
+ layer_norm_epsilon=1e-5,
+ eos_token_id=0,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.cutoffs = []
+ self.cutoffs.extend(cutoffs)
+ if proj_share_all_but_first:
+ self.tie_projs = [False] + [True] * len(self.cutoffs)
+ else:
+ self.tie_projs = [False] + [False] * len(self.cutoffs)
+ self.d_model = d_model
+ self.d_embed = d_embed
+ self.d_head = d_head
+ self.d_inner = d_inner
+ self.div_val = div_val
+ self.pre_lnorm = pre_lnorm
+ self.n_layer = n_layer
+ self.n_head = n_head
+ self.mem_len = mem_len
+ self.same_length = same_length
+ self.attn_type = attn_type
+ self.clamp_len = clamp_len
+ self.sample_softmax = sample_softmax
+ self.adaptive = adaptive
+ self.dropout = dropout
+ self.dropatt = dropatt
+ self.untie_r = untie_r
+ self.init = init
+ self.init_range = init_range
+ self.proj_init_std = proj_init_std
+ self.init_std = init_std
+ self.layer_norm_epsilon = layer_norm_epsilon
+ super().__init__(eos_token_id=eos_token_id, **kwargs)
+
+ @property
+ def max_position_embeddings(self):
+ # Message copied from Transformer-XL documentation
+ logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit.")
+ return -1
+
+ @max_position_embeddings.setter
+ def max_position_embeddings(self, value):
+ # Message copied from Transformer-XL documentation
+ raise NotImplementedError(
+ f"The model {self.model_type} is one of the few models that has no sequence length limit."
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..d2693ac333b84b08769eb15a13a26dcf1a547267
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py
@@ -0,0 +1,121 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert Transformer XL checkpoint and datasets."""
+
+
+import argparse
+import os
+import pickle
+import sys
+
+import torch
+
+from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
+from transformers.models.deprecated.transfo_xl import tokenization_transfo_xl as data_utils
+from transformers.models.deprecated.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
+from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
+
+
+logging.set_verbosity_info()
+
+# We do this to be able to load python 2 datasets pickles
+# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
+data_utils.Vocab = data_utils.TransfoXLTokenizer
+data_utils.Corpus = data_utils.TransfoXLCorpus
+sys.modules["data_utils"] = data_utils
+sys.modules["vocabulary"] = data_utils
+
+
+def convert_transfo_xl_checkpoint_to_pytorch(
+ tf_checkpoint_path, transfo_xl_config_file, pytorch_dump_folder_path, transfo_xl_dataset_file
+):
+ if transfo_xl_dataset_file:
+ # Convert a pre-processed corpus (see original TensorFlow repo)
+ with open(transfo_xl_dataset_file, "rb") as fp:
+ corpus = pickle.load(fp, encoding="latin1")
+ # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
+ pytorch_vocab_dump_path = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
+ print(f"Save vocabulary to {pytorch_vocab_dump_path}")
+ corpus_vocab_dict = corpus.vocab.__dict__
+ torch.save(corpus_vocab_dict, pytorch_vocab_dump_path)
+
+ corpus_dict_no_vocab = corpus.__dict__
+ corpus_dict_no_vocab.pop("vocab", None)
+ pytorch_dataset_dump_path = pytorch_dump_folder_path + "/" + CORPUS_NAME
+ print(f"Save dataset to {pytorch_dataset_dump_path}")
+ torch.save(corpus_dict_no_vocab, pytorch_dataset_dump_path)
+
+ if tf_checkpoint_path:
+ # Convert a pre-trained TensorFlow model
+ config_path = os.path.abspath(transfo_xl_config_file)
+ tf_path = os.path.abspath(tf_checkpoint_path)
+
+ print(f"Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.")
+ # Initialise PyTorch model
+ if transfo_xl_config_file == "":
+ config = TransfoXLConfig()
+ else:
+ config = TransfoXLConfig.from_json_file(transfo_xl_config_file)
+ print(f"Building PyTorch model from configuration: {config}")
+ model = TransfoXLLMHeadModel(config)
+
+ model = load_tf_weights_in_transfo_xl(model, config, tf_path)
+ # Save pytorch-model
+ pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)
+ pytorch_config_dump_path = os.path.join(pytorch_dump_folder_path, CONFIG_NAME)
+ print(f"Save PyTorch model to {os.path.abspath(pytorch_weights_dump_path)}")
+ torch.save(model.state_dict(), pytorch_weights_dump_path)
+ print(f"Save configuration file to {os.path.abspath(pytorch_config_dump_path)}")
+ with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
+ f.write(config.to_json_string())
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--pytorch_dump_folder_path",
+ default=None,
+ type=str,
+ required=True,
+ help="Path to the folder to store the PyTorch model or dataset/vocab.",
+ )
+ parser.add_argument(
+ "--tf_checkpoint_path",
+ default="",
+ type=str,
+ help="An optional path to a TensorFlow checkpoint path to be converted.",
+ )
+ parser.add_argument(
+ "--transfo_xl_config_file",
+ default="",
+ type=str,
+ help=(
+ "An optional config json file corresponding to the pre-trained BERT model. \n"
+ "This specifies the model architecture."
+ ),
+ )
+ parser.add_argument(
+ "--transfo_xl_dataset_file",
+ default="",
+ type=str,
+ help="An optional dataset file to be converted in a vocabulary.",
+ )
+ args = parser.parse_args()
+ convert_transfo_xl_checkpoint_to_pytorch(
+ args.tf_checkpoint_path,
+ args.transfo_xl_config_file,
+ args.pytorch_dump_folder_path,
+ args.transfo_xl_dataset_file,
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl.py
new file mode 100644
index 0000000000000000000000000000000000000000..27200a5d63f18b1d6457f1e303022f43b0d75d50
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl.py
@@ -0,0 +1,1122 @@
+# coding=utf-8
+# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+ TF 2.0 Transformer XL model.
+"""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import List, Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ....modeling_tf_utils import (
+ TFModelInputType,
+ TFPreTrainedModel,
+ TFSequenceClassificationLoss,
+ get_initializer,
+ keras,
+ keras_serializable,
+ unpack_inputs,
+)
+from ....tf_utils import shape_list, stable_softmax
+from ....utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+)
+from .configuration_transfo_xl import TransfoXLConfig
+from .modeling_tf_transfo_xl_utilities import TFAdaptiveSoftmaxMask
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "transfo-xl/transfo-xl-wt103"
+_CONFIG_FOR_DOC = "TransfoXLConfig"
+
+
+from .._archive_maps import TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+class TFPositionalEmbedding(keras.layers.Layer):
+ def __init__(self, demb, **kwargs):
+ super().__init__(**kwargs)
+
+ self.inv_freq = 1 / (10000 ** (tf.range(0, demb, 2.0) / demb))
+
+ def call(self, pos_seq, bsz=None):
+ self.inv_freq = tf.cast(self.inv_freq, dtype=pos_seq.dtype)
+ sinusoid_inp = tf.einsum("i,j->ij", pos_seq, self.inv_freq)
+ pos_emb = tf.concat([tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)], -1)
+
+ if bsz is not None:
+ return tf.tile(pos_emb[:, None, :], [1, bsz, 1])
+ else:
+ return pos_emb[:, None, :]
+
+
+class TFPositionwiseFF(keras.layers.Layer):
+ def __init__(self, d_model, d_inner, dropout, pre_lnorm=False, layer_norm_epsilon=1e-5, init_std=0.02, **kwargs):
+ super().__init__(**kwargs)
+
+ self.d_model = d_model
+ self.d_inner = d_inner
+ self.dropout = dropout
+
+ self.layer_1 = keras.layers.Dense(
+ d_inner, kernel_initializer=get_initializer(init_std), activation=tf.nn.relu, name="CoreNet_._0"
+ )
+ self.drop_1 = keras.layers.Dropout(dropout)
+ self.layer_2 = keras.layers.Dense(d_model, kernel_initializer=get_initializer(init_std), name="CoreNet_._3")
+ self.drop_2 = keras.layers.Dropout(dropout)
+
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name="layer_norm")
+
+ self.pre_lnorm = pre_lnorm
+
+ def call(self, inp, training=False):
+ if self.pre_lnorm:
+ # layer normalization + positionwise feed-forward
+ core_out = self.layer_norm(inp)
+ core_out = self.layer_1(core_out)
+ core_out = self.drop_1(core_out, training=training)
+ core_out = self.layer_2(core_out)
+ core_out = self.drop_2(core_out, training=training)
+
+ # residual connection
+ output = core_out + inp
+ else:
+ # positionwise feed-forward
+ core_out = self.layer_1(inp)
+ core_out = self.drop_1(core_out, training=training)
+ core_out = self.layer_2(core_out)
+ core_out = self.drop_2(core_out, training=training)
+
+ # residual connection + layer normalization
+ output = self.layer_norm(inp + core_out)
+
+ return output
+
+
+class TFRelPartialLearnableMultiHeadAttn(keras.layers.Layer):
+ def __init__(
+ self,
+ n_head,
+ d_model,
+ d_head,
+ dropout,
+ dropatt=0.0,
+ pre_lnorm=False,
+ r_r_bias=None,
+ r_w_bias=None,
+ layer_norm_epsilon=1e-5,
+ init_std=0.02,
+ output_attentions=False,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.n_head = n_head
+ self.d_model = d_model
+ self.d_head = d_head
+ self.dropout = dropout
+ self.output_attentions = output_attentions
+
+ self.qkv_net = keras.layers.Dense(
+ 3 * n_head * d_head, kernel_initializer=get_initializer(init_std), use_bias=False, name="qkv_net"
+ )
+
+ self.drop = keras.layers.Dropout(dropout)
+ self.dropatt = keras.layers.Dropout(dropatt)
+ self.o_net = keras.layers.Dense(
+ d_model, kernel_initializer=get_initializer(init_std), use_bias=False, name="o_net"
+ )
+
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name="layer_norm")
+
+ self.scale = 1 / (d_head**0.5)
+
+ self.pre_lnorm = pre_lnorm
+
+ if r_r_bias is not None and r_w_bias is not None: # Biases are shared
+ self.r_r_bias = r_r_bias
+ self.r_w_bias = r_w_bias
+ else:
+ self.r_r_bias = None
+ self.r_w_bias = None
+
+ self.r_net = keras.layers.Dense(
+ self.n_head * self.d_head, kernel_initializer=get_initializer(init_std), use_bias=False, name="r_net"
+ )
+
+ def build(self, input_shape):
+ if self.r_r_bias is None or self.r_w_bias is None: # Biases are not shared
+ self.r_r_bias = self.add_weight(
+ shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_r_bias"
+ )
+ self.r_w_bias = self.add_weight(
+ shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_w_bias"
+ )
+ super().build(input_shape)
+
+ def _rel_shift(self, x):
+ x_size = shape_list(x)
+
+ x = tf.pad(x, [[0, 0], [1, 0], [0, 0], [0, 0]])
+ x = tf.reshape(x, [x_size[1] + 1, x_size[0], x_size[2], x_size[3]])
+ x = tf.slice(x, [1, 0, 0, 0], [-1, -1, -1, -1])
+ x = tf.reshape(x, x_size)
+
+ return x
+
+ def call(self, w, r, attn_mask, mems, head_mask, output_attentions, training=False):
+ qlen, rlen, bsz = shape_list(w)[0], shape_list(r)[0], shape_list(w)[1]
+
+ if mems is not None:
+ mems = tf.cast(mems, dtype=w.dtype)
+ cat = tf.concat([mems, w], 0)
+ if self.pre_lnorm:
+ w_heads = self.qkv_net(self.layer_norm(cat))
+ else:
+ w_heads = self.qkv_net(cat)
+ r_head_k = self.r_net(r)
+
+ w_head_q, w_head_k, w_head_v = tf.split(w_heads, 3, axis=-1)
+ w_head_q = w_head_q[-qlen:]
+ else:
+ if self.pre_lnorm:
+ w_heads = self.qkv_net(self.layer_norm(w))
+ else:
+ w_heads = self.qkv_net(w)
+ r_head_k = self.r_net(r)
+
+ w_head_q, w_head_k, w_head_v = tf.split(w_heads, 3, axis=-1)
+
+ klen = shape_list(w_head_k)[0]
+
+ w_head_q = tf.reshape(w_head_q, (qlen, bsz, self.n_head, self.d_head)) # qlen x bsz x n_head x d_head
+ w_head_k = tf.reshape(w_head_k, (klen, bsz, self.n_head, self.d_head)) # qlen x bsz x n_head x d_head
+ w_head_v = tf.reshape(w_head_v, (klen, bsz, self.n_head, self.d_head)) # qlen x bsz x n_head x d_head
+
+ r_head_k = tf.reshape(r_head_k, (rlen, self.n_head, self.d_head)) # qlen x n_head x d_head
+
+ # compute attention score
+ rw_head_q = w_head_q + self.r_w_bias # qlen x bsz x n_head x d_head
+ AC = tf.einsum("ibnd,jbnd->ijbn", rw_head_q, w_head_k) # qlen x klen x bsz x n_head
+
+ rr_head_q = w_head_q + self.r_r_bias
+ BD = tf.einsum("ibnd,jnd->ijbn", rr_head_q, r_head_k) # qlen x klen x bsz x n_head
+ BD = self._rel_shift(BD)
+
+ # [qlen x klen x bsz x n_head]
+ attn_score = AC + BD
+ attn_score = attn_score * self.scale
+
+ # compute attention probability
+ if attn_mask is not None:
+ attn_mask_t = attn_mask[:, :, None, None]
+ attn_mask_t = tf.cast(attn_mask_t, dtype=attn_score.dtype)
+ attn_score = attn_score * (1.0 - attn_mask_t) - 1e30 * attn_mask_t
+
+ # [qlen x klen x bsz x n_head]
+ attn_prob = stable_softmax(attn_score, axis=1)
+ attn_prob = self.dropatt(attn_prob, training=training)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attn_prob = attn_prob * head_mask
+
+ # compute attention vector
+ attn_vec = tf.einsum("ijbn,jbnd->ibnd", attn_prob, w_head_v)
+
+ # [qlen x bsz x n_head x d_head]
+ attn_vec_sizes = shape_list(attn_vec)
+ attn_vec = tf.reshape(attn_vec, (attn_vec_sizes[0], attn_vec_sizes[1], self.n_head * self.d_head))
+
+ # linear projection
+ attn_out = self.o_net(attn_vec)
+ attn_out = self.drop(attn_out, training=training)
+
+ if self.pre_lnorm:
+ # residual connection
+ outputs = [w + attn_out]
+ else:
+ # residual connection + layer normalization
+ outputs = [self.layer_norm(w + attn_out)]
+
+ if output_attentions:
+ outputs.append(attn_prob)
+
+ return outputs
+
+
+class TFRelPartialLearnableDecoderLayer(keras.layers.Layer):
+ def __init__(
+ self,
+ n_head,
+ d_model,
+ d_head,
+ d_inner,
+ dropout,
+ dropatt=0.0,
+ pre_lnorm=False,
+ r_w_bias=None,
+ r_r_bias=None,
+ layer_norm_epsilon=1e-5,
+ init_std=0.02,
+ output_attentions=False,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.dec_attn = TFRelPartialLearnableMultiHeadAttn(
+ n_head,
+ d_model,
+ d_head,
+ dropout,
+ dropatt=dropatt,
+ pre_lnorm=pre_lnorm,
+ r_w_bias=r_w_bias,
+ r_r_bias=r_r_bias,
+ init_std=init_std,
+ layer_norm_epsilon=layer_norm_epsilon,
+ output_attentions=output_attentions,
+ name="dec_attn",
+ )
+ self.pos_ff = TFPositionwiseFF(
+ d_model,
+ d_inner,
+ dropout,
+ pre_lnorm=pre_lnorm,
+ init_std=init_std,
+ layer_norm_epsilon=layer_norm_epsilon,
+ name="pos_ff",
+ )
+
+ def call(self, dec_inp, r, dec_attn_mask, mems, head_mask, output_attentions, training=False):
+ attn_outputs = self.dec_attn(dec_inp, r, dec_attn_mask, mems, head_mask, output_attentions, training=training)
+ ff_output = self.pos_ff(attn_outputs[0], training=training)
+
+ outputs = [ff_output] + attn_outputs[1:]
+
+ return outputs
+
+
+class TFTransfoEmbeddings(keras.layers.Layer):
+ def __init__(self, vocab_size, emb_size, init_std, **kwargs):
+ super().__init__(**kwargs)
+
+ self.vocab_size = vocab_size
+ self.emb_size = emb_size
+ self.init_std = init_std
+
+ def build(self, input_shape):
+ self.weight = self.add_weight(
+ shape=(self.vocab_size, self.emb_size),
+ initializer=get_initializer(self.init_std),
+ name="embeddings",
+ )
+
+ super().build(input_shape)
+
+ def call(self, inputs):
+ return tf.gather(self.weight, inputs)
+
+
+class TFAdaptiveEmbedding(keras.layers.Layer):
+ def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, init_std=0.02, sample_softmax=False, **kwargs):
+ super().__init__(**kwargs)
+
+ self.n_token = n_token
+ self.d_embed = d_embed
+ self.init_std = init_std
+
+ self.cutoffs = cutoffs + [n_token]
+ self.div_val = div_val
+ self.d_proj = d_proj
+
+ self.emb_scale = d_proj**0.5
+
+ self.cutoff_ends = [0] + self.cutoffs
+
+ self.emb_layers = []
+ self.emb_projs = []
+
+ if div_val == 1:
+ raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
+ else:
+ for i in range(len(self.cutoffs)):
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
+ d_emb_i = d_embed // (div_val**i)
+ self.emb_layers.append(
+ TFTransfoEmbeddings(
+ r_idx - l_idx,
+ d_emb_i,
+ init_std,
+ name=f"emb_layers_._{i}",
+ )
+ )
+
+ def build(self, input_shape):
+ for i in range(len(self.cutoffs)):
+ d_emb_i = self.d_embed // (self.div_val**i)
+ self.emb_projs.append(
+ self.add_weight(
+ shape=(d_emb_i, self.d_proj),
+ initializer=get_initializer(self.init_std),
+ trainable=True,
+ name=f"emb_projs_._{i}",
+ )
+ )
+
+ super().build(input_shape)
+
+ def call(self, inp):
+ if self.div_val == 1:
+ raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
+ else:
+ inp_flat = tf.reshape(inp, (-1,))
+ emb_flat = tf.zeros([shape_list(inp_flat)[0], self.d_proj])
+ for i in range(len(self.cutoffs)):
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
+
+ mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
+
+ inp_i = tf.boolean_mask(inp_flat, mask_i) - l_idx
+ emb_i = self.emb_layers[i](inp_i)
+ emb_i = tf.einsum("id,de->ie", emb_i, self.emb_projs[i])
+
+ mask_idx = tf.where(mask_i)
+ scatter = tf.scatter_nd(mask_idx, emb_i, shape_list(emb_flat))
+ emb_flat = tf.cast(emb_flat, dtype=scatter.dtype)
+ emb_flat += scatter
+
+ embed_shape = shape_list(inp) + [self.d_proj]
+ embed = tf.reshape(emb_flat, embed_shape)
+
+ embed *= self.emb_scale
+
+ return embed
+
+
+@keras_serializable
+class TFTransfoXLMainLayer(keras.layers.Layer):
+ config_class = TransfoXLConfig
+
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.output_hidden_states = config.output_hidden_states
+ self.output_attentions = config.output_attentions
+ self.return_dict = config.use_return_dict
+
+ self.n_token = config.vocab_size
+
+ self.d_embed = config.d_embed
+ self.d_model = config.d_model
+ self.n_head = config.n_head
+ self.d_head = config.d_head
+ self.untie_r = config.untie_r
+
+ self.word_emb = TFAdaptiveEmbedding(
+ config.vocab_size,
+ config.d_embed,
+ config.d_model,
+ config.cutoffs,
+ div_val=config.div_val,
+ init_std=config.init_std,
+ name="word_emb",
+ )
+
+ self.drop = keras.layers.Dropout(config.dropout)
+
+ self.n_layer = config.n_layer
+ self.mem_len = config.mem_len
+ self.attn_type = config.attn_type
+
+ self.layers = []
+ if config.attn_type == 0: # the default attention
+ for i in range(config.n_layer):
+ self.layers.append(
+ TFRelPartialLearnableDecoderLayer(
+ config.n_head,
+ config.d_model,
+ config.d_head,
+ config.d_inner,
+ config.dropout,
+ dropatt=config.dropatt,
+ pre_lnorm=config.pre_lnorm,
+ r_w_bias=None if self.untie_r else self.r_w_bias,
+ r_r_bias=None if self.untie_r else self.r_r_bias,
+ layer_norm_epsilon=config.layer_norm_epsilon,
+ init_std=config.init_std,
+ output_attentions=self.output_attentions,
+ name=f"layers_._{i}",
+ )
+ )
+ else: # learnable embeddings and absolute embeddings
+ raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
+
+ self.same_length = config.same_length
+ self.clamp_len = config.clamp_len
+
+ if self.attn_type == 0: # default attention
+ self.pos_emb = TFPositionalEmbedding(self.d_model, name="pos_emb")
+ else: # learnable embeddings and absolute embeddings
+ raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
+
+ def build(self, input_shape):
+ if not self.untie_r:
+ self.r_w_bias = self.add_weight(
+ shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_w_bias"
+ )
+ self.r_r_bias = self.add_weight(
+ shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_r_bias"
+ )
+ super().build(input_shape)
+
+ def get_input_embeddings(self):
+ return self.word_emb
+
+ def set_input_embeddings(self, value):
+ raise NotImplementedError
+
+ def backward_compatible(self):
+ self.sample_softmax = -1
+
+ def reset_memory_length(self, mem_len):
+ self.mem_len = mem_len
+
+ def _prune_heads(self, heads):
+ raise NotImplementedError
+
+ def init_mems(self, bsz):
+ if self.mem_len > 0:
+ mems = []
+ for i in range(self.n_layer):
+ empty = tf.zeros([self.mem_len, bsz, self.d_model])
+ mems.append(empty)
+
+ return mems
+ else:
+ return None
+
+ def _update_mems(self, hids, mems, mlen, qlen):
+ # does not deal with None
+ if mems is None:
+ return None
+
+ # mems is not None
+ assert len(hids) == len(mems), "len(hids) != len(mems)"
+
+ # There are `mlen + qlen` steps that can be cached into mems
+ new_mems = []
+ end_idx = mlen + tf.math.maximum(0, qlen)
+ beg_idx = tf.math.maximum(0, end_idx - tf.convert_to_tensor(self.mem_len))
+ for i in range(len(hids)):
+ mems[i] = tf.cast(mems[i], dtype=hids[i].dtype)
+ cat = tf.concat([mems[i], hids[i]], axis=0)
+ tf.stop_gradient(cat)
+ new_mems.append(cat[beg_idx:end_idx])
+
+ return new_mems
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ mems: List[tf.Tensor] | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: bool = False,
+ ):
+ # the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library
+ # so we transpose here from shape [bsz, len] to shape [len, bsz]
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_ids = tf.transpose(input_ids, perm=(1, 0))
+ qlen, bsz = shape_list(input_ids)
+ elif inputs_embeds is not None:
+ inputs_embeds = tf.transpose(inputs_embeds, perm=(1, 0, 2))
+ qlen, bsz = shape_list(inputs_embeds)[:2]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if mems is None:
+ mems = self.init_mems(bsz)
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
+ # and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
+ if head_mask is not None:
+ raise NotImplementedError
+ else:
+ head_mask = [None] * self.n_layer
+
+ if inputs_embeds is not None:
+ word_emb = inputs_embeds
+ else:
+ word_emb = self.word_emb(input_ids)
+
+ mlen = shape_list(mems[0])[0] if mems is not None else 0
+ klen = mlen + qlen
+
+ # Compute decoder attention mask
+ all_ones = tf.ones([qlen, klen], dtype=tf.int32)
+ upper_mask = 1 - tf.linalg.band_part(tf.ones([qlen, klen], dtype=tf.int32), -1, mlen)
+ if self.same_length:
+ mask_len = klen - self.mem_len
+ mask_shift_len = qlen - tf.nn.relu(mask_len) # Lazy clamping of negatives to zero
+
+ # Use an indicator variable instead of a conditional to keep the compiler happy
+ lower_mask = tf.linalg.band_part(all_ones, -1, 0) - (
+ tf.linalg.band_part(all_ones, mask_shift_len - 1, 0) * tf.cast(mask_shift_len != 0, tf.int32)
+ )
+ dec_attn_mask = upper_mask + lower_mask
+ else:
+ dec_attn_mask = upper_mask
+
+ hids = []
+ attentions = [] if output_attentions else None
+ if self.attn_type == 0: # default
+ pos_seq = tf.range(klen - 1, -1, -1.0)
+ if self.clamp_len > 0:
+ pos_seq = tf.minimum(pos_seq, self.clamp_len)
+ pos_emb = self.pos_emb(pos_seq)
+
+ core_out = self.drop(word_emb, training=training)
+ pos_emb = self.drop(pos_emb, training=training)
+
+ for i, layer in enumerate(self.layers):
+ hids.append(core_out)
+ mems_i = None if mems is None else mems[i]
+ layer_outputs = layer(
+ core_out,
+ pos_emb,
+ dec_attn_mask,
+ mems_i,
+ head_mask[i],
+ output_attentions,
+ training=training,
+ )
+ core_out = layer_outputs[0]
+ if output_attentions:
+ attentions.append(layer_outputs[1])
+ else: # learnable embeddings and absolute embeddings
+ raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
+
+ core_out = self.drop(core_out, training=training)
+
+ new_mems = self._update_mems(hids, mems, mlen, qlen)
+
+ # We transpose back here to shape [bsz, len, hidden_dim]
+ core_out = tf.transpose(core_out, perm=(1, 0, 2))
+
+ if output_hidden_states:
+ # Transpose to library standard shape [bsz, len, hidden_dim] and add last layer
+ hids = tuple(tf.transpose(t, perm=(1, 0, 2)) for t in hids)
+ hids = hids + (core_out,)
+ else:
+ hids = None
+ if output_attentions:
+ # Transpose to library standard shape [bsz, n_heads, query_seq_len, key_seq_len]
+ attentions = tuple(tf.transpose(t, perm=(2, 3, 0, 1)) for t in attentions)
+
+ if not return_dict:
+ return tuple(v for v in [core_out, new_mems, hids, attentions] if v is not None)
+
+ return TFTransfoXLModelOutput(
+ last_hidden_state=core_out,
+ mems=new_mems,
+ hidden_states=hids,
+ attentions=attentions,
+ )
+
+
+class TFTransfoXLPreTrainedModel(TFPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = TransfoXLConfig
+ base_model_prefix = "transformer"
+
+
+@dataclass
+class TFTransfoXLModelOutput(ModelOutput):
+ """
+ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
+
+ Args:
+ last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ mems (`List[tf.Tensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`
+ input) to speed up sequential decoding. The token ids which have their past given to this model should not
+ be passed as input ids as they have already been computed.
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ last_hidden_state: tf.Tensor = None
+ mems: List[tf.Tensor] = None
+ hidden_states: Tuple[tf.Tensor] | None = None
+ attentions: Tuple[tf.Tensor] | None = None
+
+
+@dataclass
+class TFTransfoXLLMHeadModelOutput(ModelOutput):
+ """
+ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
+
+ Args:
+ losses (`tf.Tensor` of shape *(batch_size, sequence_length-1)*, *optional*, returned when `labels` is provided):
+ Language modeling losses (not reduced).
+ prediction_scores (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token after SoftMax).
+ mems (`List[tf.Tensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`
+ input) to speed up sequential decoding. The token ids which have their past given to this model should not
+ be passed as input ids as they have already been computed.
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ prediction_scores: tf.Tensor = None
+ mems: List[tf.Tensor] = None
+ hidden_states: Tuple[tf.Tensor] | None = None
+ attentions: Tuple[tf.Tensor] | None = None
+
+
+@dataclass
+class TFTransfoXLSequenceClassifierOutputWithPast(ModelOutput):
+ """
+ Base class for outputs of sentence classification models.
+
+ Args:
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Classification (or regression if config.num_labels==1) loss.
+ logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`):
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
+ mems (`List[tf.Tensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`
+ input) to speed up sequential decoding. The token ids which have their past given to this model should not
+ be passed as input ids as they have already been computed.
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: tf.Tensor | None = None
+ logits: tf.Tensor = None
+ mems: List[tf.Tensor] = None
+ hidden_states: Tuple[tf.Tensor] | None = None
+ attentions: Tuple[tf.Tensor] | None = None
+
+
+TRANSFO_XL_START_DOCSTRING = r"""
+
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TensorFlow models and layers in `transformers` accept two formats as input:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional argument.
+
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
+ positional argument:
+
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
+
+ Note that when creating models and layers with
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
+ about any of this, as you can just pass inputs like you would to any other Python function!
+
+
+
+ Parameters:
+ config ([`TransfoXLConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+TRANSFO_XL_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
+ [`PreTrainedTokenizer.encode`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ mems (`List[tf.Tensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see
+ `mems` output below). Can be used to speed up sequential decoding. The token ids which have their mems
+ given to this model should not be passed as `input_ids` as they have already been computed.
+ head_mask (`tf.Tensor` or `Numpy array` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+ inputs_embeds (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
+ config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+"""
+
+
+@add_start_docstrings(
+ "The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
+ TRANSFO_XL_START_DOCSTRING,
+)
+class TFTransfoXLModel(TFTransfoXLPreTrainedModel):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.transformer = TFTransfoXLMainLayer(config, name="transformer")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFTransfoXLModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ mems: List[tf.Tensor] | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: bool | None = None,
+ output_hidden_states: bool | None = None,
+ return_dict: bool | None = None,
+ training: bool = False,
+ ) -> TFTransfoXLModelOutput | Tuple[tf.Tensor]:
+ outputs = self.transformer(
+ input_ids=input_ids,
+ mems=mems,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return outputs
+
+
+@add_start_docstrings(
+ """
+ The Transformer-XL Model with a language modeling head on top (adaptive softmax with weights tied to the adaptive
+ input embeddings)
+ """,
+ TRANSFO_XL_START_DOCSTRING,
+)
+class TFTransfoXLLMHeadModel(TFTransfoXLPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.transformer = TFTransfoXLMainLayer(config, name="transformer")
+ self.sample_softmax = config.sample_softmax
+ assert self.sample_softmax <= 0, (
+ "Sampling from the softmax is not implemented yet. Please look at issue: #3310:"
+ " https://github.com/huggingface/transformers/issues/3310"
+ )
+
+ self.crit = TFAdaptiveSoftmaxMask(
+ config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val, name="crit"
+ )
+
+ def _resize_token_embeddings(self, new_num_tokens):
+ raise NotImplementedError()
+
+ def get_output_embeddings(self):
+ """Double-check if you are using adaptive softmax."""
+ if len(self.crit.out_layers) > 0:
+ return self.crit.out_layers[-1]
+ return None
+
+ def reset_memory_length(self, mem_len):
+ self.transformer.reset_memory_length(mem_len)
+
+ def init_mems(self, bsz):
+ return self.transformer.init_mems(bsz)
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFTransfoXLLMHeadModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ mems: List[tf.Tensor] | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: bool | None = None,
+ output_hidden_states: bool | None = None,
+ return_dict: bool | None = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: bool = False,
+ ) -> TFTransfoXLLMHeadModelOutput | Tuple[tf.Tensor]:
+ if input_ids is not None:
+ bsz, tgt_len = shape_list(input_ids)[:2]
+ else:
+ bsz, tgt_len = shape_list(inputs_embeds)[:2]
+
+ transformer_outputs = self.transformer(
+ input_ids,
+ mems,
+ head_mask,
+ inputs_embeds,
+ output_attentions,
+ output_hidden_states,
+ return_dict,
+ training=training,
+ )
+
+ last_hidden = transformer_outputs[0]
+ pred_hid = last_hidden[:, -tgt_len:]
+
+ softmax_output = self.crit(pred_hid, labels, training=training)
+ prediction_scores = softmax_output if labels is None else ()
+
+ if not return_dict:
+ return (prediction_scores,) + transformer_outputs[1:]
+
+ return TFTransfoXLLMHeadModelOutput(
+ prediction_scores=prediction_scores,
+ mems=transformer_outputs.mems,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **model_kwargs):
+ inputs = {}
+
+ # if past is defined in model kwargs then use it for faster decoding
+ if past_key_values:
+ input_ids = tf.expand_dims(input_ids[:, -1], axis=-1)
+ else:
+ input_ids = input_ids
+
+ return inputs
+
+ # Adapted from the torch tie_weights function
+ def tf_to_pt_weight_rename(self, tf_weight):
+ if self.config.tie_word_embeddings and "crit.out_layers" in tf_weight:
+ return tf_weight, tf_weight.replace("crit.out_layers", "transformer.word_emb.emb_layers")
+ elif self.config.tie_projs and "crit.out_projs" in tf_weight:
+ for i, tie_proj in enumerate(self.config.tie_projs):
+ if tie_proj and self.config.div_val == 1 and self.config.d_model != self.config.d_embed:
+ # self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[0]
+ return tf_weight, tf_weight.replace(f"crit.out_projs.{i}", "transformer.word_emb.emb_projs.0")
+ elif tie_proj and self.config.div_val != 1:
+ # self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[i]
+ return tf_weight, tf_weight.replace("crit.out_projs", "transformer.word_emb.emb_projs")
+ else:
+ return (tf_weight,)
+
+
+@add_start_docstrings(
+ """
+ The Transfo XL Model transformer with a sequence classification head on top (linear layer).
+
+ [`TFTransfoXLForSequenceClassification`] uses the last token in order to do the classification, as other causal
+ models (e.g. GPT-1,GPT-2) do.
+
+ Since it does classification on the last token, it requires to know the position of the last token. If a
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
+ each row of the batch).
+ """,
+ TRANSFO_XL_START_DOCSTRING,
+)
+class TFTransfoXLForSequenceClassification(TFTransfoXLPreTrainedModel, TFSequenceClassificationLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.num_labels = config.num_labels
+ self.score = keras.layers.Dense(
+ config.num_labels,
+ kernel_initializer=get_initializer(config.init_range),
+ name="score",
+ use_bias=False,
+ )
+ self.transformer = TFTransfoXLMainLayer(config, name="transformer")
+
+ def get_output_embeddings(self):
+ # Remove after transformers v4.32. Fix this model's `test_model_common_attributes` test too.
+ logger.warning(
+ "Sequence classification models do not have output embeddings. `.get_output_embeddings` will be removed "
+ "in transformers v4.32."
+ )
+ return self.transformer.word_emb
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFTransfoXLSequenceClassifierOutputWithPast,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ mems: List[tf.Tensor] | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[Tuple, TFTransfoXLSequenceClassifierOutputWithPast]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,
+ config.vocab_size - 1]`.
+ """
+ transformer_outputs = self.transformer(
+ input_ids=input_ids,
+ mems=mems,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ hidden_states = transformer_outputs[0]
+ logits = self.score(hidden_states)
+ in_logits = None
+ if self.config.pad_token_id is None:
+ sequence_lengths = -1
+ else:
+ if input_ids is not None:
+ sequence_lengths = (
+ tf.argmax(tf.cast(tf.math.equal(input_ids, self.config.pad_token_id), input_ids.dtype), axis=-1)
+ - 1
+ )
+ sequence_lengths = tf.where(sequence_lengths >= 0, sequence_lengths, input_ids.shape[-1] - 1)
+ in_logits = tf.gather(logits, sequence_lengths, batch_dims=1, axis=1)
+ else:
+ sequence_lengths = -1
+ logger.warning(
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
+ )
+ loss = None
+
+ if labels is not None:
+ if input_ids is not None:
+ batch_size, sequence_length = shape_list(input_ids)[:2]
+ else:
+ batch_size, sequence_length = shape_list(inputs_embeds)[:2]
+ assert (
+ self.config.pad_token_id is not None or batch_size == 1
+ ), "Cannot handle batch sizes > 1 if no padding token is defined."
+
+ if not tf.is_tensor(sequence_lengths):
+ in_logits = logits[0:batch_size, sequence_lengths]
+
+ loss = self.hf_compute_loss(tf.reshape(labels, [-1, 1]), tf.reshape(in_logits, [-1, self.num_labels]))
+
+ pooled_logits = in_logits if in_logits is not None else logits
+
+ if not return_dict:
+ output = (pooled_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFTransfoXLSequenceClassifierOutputWithPast(
+ loss=loss,
+ logits=pooled_logits,
+ mems=transformer_outputs.mems,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl_utilities.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl_utilities.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed1488d5595cb8f36eb540992fb4ca46534a60fb
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl_utilities.py
@@ -0,0 +1,179 @@
+# coding=utf-8
+# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+ A TF 2.0 Adaptive Softmax for Transformer XL model.
+"""
+
+
+import tensorflow as tf
+
+from ....modeling_tf_utils import keras
+from ....tf_utils import shape_list
+
+
+class TFAdaptiveSoftmaxMask(keras.layers.Layer):
+ def __init__(self, vocab_size, d_embed, d_proj, cutoffs, div_val=1, keep_order=False, **kwargs):
+ super().__init__(**kwargs)
+
+ self.vocab_size = vocab_size
+ self.d_embed = d_embed
+ self.d_proj = d_proj
+
+ self.cutoffs = cutoffs + [vocab_size]
+ self.cutoff_ends = [0] + self.cutoffs
+ self.div_val = div_val
+
+ self.shortlist_size = self.cutoffs[0]
+ self.n_clusters = len(self.cutoffs) - 1
+ self.head_size = self.shortlist_size + self.n_clusters
+ self.keep_order = keep_order
+
+ self.out_layers = []
+ self.out_projs = []
+
+ def build(self, input_shape):
+ if self.n_clusters > 0:
+ self.cluster_weight = self.add_weight(
+ shape=(self.n_clusters, self.d_embed), initializer="zeros", trainable=True, name="cluster_weight"
+ )
+ self.cluster_bias = self.add_weight(
+ shape=(self.n_clusters,), initializer="zeros", trainable=True, name="cluster_bias"
+ )
+
+ if self.div_val == 1:
+ for i in range(len(self.cutoffs)):
+ if self.d_proj != self.d_embed:
+ weight = self.add_weight(
+ shape=(self.d_embed, self.d_proj),
+ initializer="zeros",
+ trainable=True,
+ name=f"out_projs_._{i}",
+ )
+ self.out_projs.append(weight)
+ else:
+ self.out_projs.append(None)
+ weight = self.add_weight(
+ shape=(self.vocab_size, self.d_embed),
+ initializer="zeros",
+ trainable=True,
+ name=f"out_layers_._{i}_._weight",
+ )
+ bias = self.add_weight(
+ shape=(self.vocab_size,),
+ initializer="zeros",
+ trainable=True,
+ name=f"out_layers_._{i}_._bias",
+ )
+ self.out_layers.append((weight, bias))
+ else:
+ for i in range(len(self.cutoffs)):
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
+ d_emb_i = self.d_embed // (self.div_val**i)
+
+ weight = self.add_weight(
+ shape=(d_emb_i, self.d_proj), initializer="zeros", trainable=True, name=f"out_projs_._{i}"
+ )
+ self.out_projs.append(weight)
+ weight = self.add_weight(
+ shape=(r_idx - l_idx, d_emb_i),
+ initializer="zeros",
+ trainable=True,
+ name=f"out_layers_._{i}_._weight",
+ )
+ bias = self.add_weight(
+ shape=(r_idx - l_idx,),
+ initializer="zeros",
+ trainable=True,
+ name=f"out_layers_._{i}_._bias",
+ )
+ self.out_layers.append((weight, bias))
+ super().build(input_shape)
+
+ @staticmethod
+ def _logit(x, W, b, proj=None):
+ y = x
+ if proj is not None:
+ y = tf.einsum("ibd,ed->ibe", y, proj)
+ return tf.einsum("ibd,nd->ibn", y, W) + b
+
+ @staticmethod
+ def _gather_logprob(logprob, target):
+ lp_size = shape_list(logprob)
+ r = tf.range(lp_size[0], dtype=target.dtype)
+ idx = tf.stack([r, target], 1)
+ return tf.gather_nd(logprob, idx)
+
+ def call(self, hidden, target, return_mean=True, training=False):
+ head_logprob = 0
+ if self.n_clusters == 0:
+ output = self._logit(hidden, self.out_layers[0][0], self.out_layers[0][1], self.out_projs[0])
+ if target is not None:
+ loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=output)
+ out = tf.nn.log_softmax(output, axis=-1)
+ else:
+ hidden_sizes = shape_list(hidden)
+ out = []
+ loss = tf.zeros(hidden_sizes[:2])
+ for i in range(len(self.cutoffs)):
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
+ if target is not None:
+ mask = (target >= l_idx) & (target < r_idx)
+ mask_idx = tf.where(mask)
+ cur_target = tf.boolean_mask(target, mask) - l_idx
+
+ if self.div_val == 1:
+ cur_W = self.out_layers[0][0][l_idx:r_idx]
+ cur_b = self.out_layers[0][1][l_idx:r_idx]
+ else:
+ cur_W = self.out_layers[i][0]
+ cur_b = self.out_layers[i][1]
+
+ if i == 0:
+ cur_W = tf.concat([cur_W, self.cluster_weight], 0)
+ cur_b = tf.concat([cur_b, self.cluster_bias], 0)
+
+ head_logit = self._logit(hidden, cur_W, cur_b, self.out_projs[0])
+ head_logprob = tf.nn.log_softmax(head_logit)
+ out.append(head_logprob[..., : self.cutoffs[0]])
+ if target is not None:
+ cur_head_logprob = tf.boolean_mask(head_logprob, mask)
+ cur_logprob = self._gather_logprob(cur_head_logprob, cur_target)
+ else:
+ tail_logit = self._logit(hidden, cur_W, cur_b, self.out_projs[i])
+ tail_logprob = tf.nn.log_softmax(tail_logit)
+ cluster_prob_idx = self.cutoffs[0] + i - 1 # No probability for the head cluster
+ logprob_i = head_logprob[..., cluster_prob_idx, None] + tail_logprob
+ out.append(logprob_i)
+ if target is not None:
+ cur_head_logprob = tf.boolean_mask(head_logprob, mask)
+ cur_tail_logprob = tf.boolean_mask(tail_logprob, mask)
+ cur_logprob = self._gather_logprob(cur_tail_logprob, cur_target)
+ cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
+ if target is not None:
+ loss += tf.scatter_nd(mask_idx, -cur_logprob, shape_list(loss))
+ out = tf.concat(out, axis=-1)
+
+ if target is not None:
+ if return_mean:
+ loss = tf.reduce_mean(loss)
+ # Add the training-time loss value to the layer using `self.add_loss()`.
+ self.add_loss(loss)
+
+ # Log the loss as a metric (we could log arbitrary metrics,
+ # including different metrics for training and inference.
+ self.add_metric(loss, name=self.name, aggregation="mean" if return_mean else "")
+
+ return out
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py
new file mode 100644
index 0000000000000000000000000000000000000000..897a3899c74cbd84713d96e3dad90cce2411dd17
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py
@@ -0,0 +1,1295 @@
+# coding=utf-8
+# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+ PyTorch Transformer XL model. Adapted from https://github.com/kimiyoung/transformer-xl. In particular
+ https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/mem_transformer.py
+"""
+import warnings
+from dataclasses import dataclass
+from typing import List, Optional, Tuple, Union
+
+import torch
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ....modeling_utils import PreTrainedModel
+from ....utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+)
+from .configuration_transfo_xl import TransfoXLConfig
+from .modeling_transfo_xl_utilities import ProjectedAdaptiveLogSoftmax
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "transfo-xl/transfo-xl-wt103"
+_CONFIG_FOR_DOC = "TransfoXLConfig"
+
+
+from .._archive_maps import TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+def build_tf_to_pytorch_map(model, config):
+ """
+ A map of modules from TF to PyTorch. This time I use a map to keep the PyTorch model as identical to the original
+ PyTorch model as possible.
+ """
+ tf_to_pt_map = {}
+
+ if hasattr(model, "transformer"):
+ # We are loading in a TransfoXLLMHeadModel => we will load also the Adaptive Softmax
+ tf_to_pt_map.update(
+ {
+ "transformer/adaptive_softmax/cutoff_0/cluster_W": model.crit.cluster_weight,
+ "transformer/adaptive_softmax/cutoff_0/cluster_b": model.crit.cluster_bias,
+ }
+ )
+ for i, (out_l, proj_l, tie_proj) in enumerate(
+ zip(model.crit.out_layers, model.crit.out_projs, config.tie_projs)
+ ):
+ layer_str = f"transformer/adaptive_softmax/cutoff_{i}/"
+ if config.tie_word_embeddings:
+ tf_to_pt_map.update({layer_str + "b": out_l.bias})
+ else:
+ raise NotImplementedError
+ # I don't think this is implemented in the TF code
+ tf_to_pt_map.update({layer_str + "lookup_table": out_l.weight, layer_str + "b": out_l.bias})
+ if not tie_proj:
+ tf_to_pt_map.update({layer_str + "proj": proj_l})
+ # Now load the rest of the transformer
+ model = model.transformer
+
+ # Embeddings
+ for i, (embed_l, proj_l) in enumerate(zip(model.word_emb.emb_layers, model.word_emb.emb_projs)):
+ layer_str = f"transformer/adaptive_embed/cutoff_{i}/"
+ tf_to_pt_map.update({layer_str + "lookup_table": embed_l.weight, layer_str + "proj_W": proj_l})
+
+ # Transformer blocks
+ for i, b in enumerate(model.layers):
+ layer_str = f"transformer/layer_{i}/"
+ tf_to_pt_map.update(
+ {
+ layer_str + "rel_attn/LayerNorm/gamma": b.dec_attn.layer_norm.weight,
+ layer_str + "rel_attn/LayerNorm/beta": b.dec_attn.layer_norm.bias,
+ layer_str + "rel_attn/o/kernel": b.dec_attn.o_net.weight,
+ layer_str + "rel_attn/qkv/kernel": b.dec_attn.qkv_net.weight,
+ layer_str + "rel_attn/r/kernel": b.dec_attn.r_net.weight,
+ layer_str + "ff/LayerNorm/gamma": b.pos_ff.layer_norm.weight,
+ layer_str + "ff/LayerNorm/beta": b.pos_ff.layer_norm.bias,
+ layer_str + "ff/layer_1/kernel": b.pos_ff.CoreNet[0].weight,
+ layer_str + "ff/layer_1/bias": b.pos_ff.CoreNet[0].bias,
+ layer_str + "ff/layer_2/kernel": b.pos_ff.CoreNet[3].weight,
+ layer_str + "ff/layer_2/bias": b.pos_ff.CoreNet[3].bias,
+ }
+ )
+
+ # Relative positioning biases
+ if config.untie_r:
+ r_r_list = []
+ r_w_list = []
+ for b in model.layers:
+ r_r_list.append(b.dec_attn.r_r_bias)
+ r_w_list.append(b.dec_attn.r_w_bias)
+ else:
+ r_r_list = [model.r_r_bias]
+ r_w_list = [model.r_w_bias]
+ tf_to_pt_map.update({"transformer/r_r_bias": r_r_list, "transformer/r_w_bias": r_w_list})
+ return tf_to_pt_map
+
+
+def load_tf_weights_in_transfo_xl(model, config, tf_path):
+ """Load tf checkpoints in a pytorch model"""
+ try:
+ import numpy as np
+ import tensorflow as tf
+ except ImportError:
+ logger.error(
+ "Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
+ "https://www.tensorflow.org/install/ for installation instructions."
+ )
+ raise
+ # Build TF to PyTorch weights loading map
+ tf_to_pt_map = build_tf_to_pytorch_map(model, config)
+
+ # Load weights from TF model
+ init_vars = tf.train.list_variables(tf_path)
+ tf_weights = {}
+ for name, shape in init_vars:
+ logger.info(f"Loading TF weight {name} with shape {shape}")
+ array = tf.train.load_variable(tf_path, name)
+ tf_weights[name] = array
+
+ for name, pointer in tf_to_pt_map.items():
+ assert name in tf_weights
+ array = tf_weights[name]
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
+ # which are not required for using pretrained model
+ if "kernel" in name or "proj" in name:
+ array = np.transpose(array)
+ if ("r_r_bias" in name or "r_w_bias" in name) and len(pointer) > 1:
+ # Here we will split the TF weights
+ assert len(pointer) == array.shape[0]
+ for i, p_i in enumerate(pointer):
+ arr_i = array[i, ...]
+ try:
+ assert p_i.shape == arr_i.shape
+ except AssertionError as e:
+ e.args += (p_i.shape, arr_i.shape)
+ raise
+ logger.info(f"Initialize PyTorch weight {name} for layer {i}")
+ p_i.data = torch.from_numpy(arr_i)
+ else:
+ try:
+ assert (
+ pointer.shape == array.shape
+ ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
+ except AssertionError as e:
+ e.args += (pointer.shape, array.shape)
+ raise
+ logger.info(f"Initialize PyTorch weight {name}")
+ pointer.data = torch.from_numpy(array)
+ tf_weights.pop(name, None)
+ tf_weights.pop(name + "/Adam", None)
+ tf_weights.pop(name + "/Adam_1", None)
+
+ logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}")
+ return model
+
+
+class PositionalEmbedding(nn.Module):
+ def __init__(self, demb):
+ super().__init__()
+
+ self.demb = demb
+
+ inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
+ self.register_buffer("inv_freq", inv_freq)
+
+ def forward(self, pos_seq, bsz=None):
+ sinusoid_inp = torch.outer(pos_seq, self.inv_freq)
+ pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
+
+ if bsz is not None:
+ return pos_emb[:, None, :].expand(-1, bsz, -1)
+ else:
+ return pos_emb[:, None, :]
+
+
+class PositionwiseFF(nn.Module):
+ def __init__(self, d_model, d_inner, dropout, pre_lnorm=False, layer_norm_epsilon=1e-5):
+ super().__init__()
+
+ self.d_model = d_model
+ self.d_inner = d_inner
+ self.dropout = dropout
+
+ self.CoreNet = nn.Sequential(
+ nn.Linear(d_model, d_inner),
+ nn.ReLU(inplace=True),
+ nn.Dropout(dropout),
+ nn.Linear(d_inner, d_model),
+ nn.Dropout(dropout),
+ )
+
+ self.layer_norm = nn.LayerNorm(d_model, eps=layer_norm_epsilon)
+
+ self.pre_lnorm = pre_lnorm
+
+ def forward(self, inp):
+ if self.pre_lnorm:
+ # layer normalization + positionwise feed-forward
+ core_out = self.CoreNet(self.layer_norm(inp))
+
+ # residual connection
+ output = core_out + inp
+ else:
+ # positionwise feed-forward
+ core_out = self.CoreNet(inp)
+
+ # residual connection + layer normalization
+ output = self.layer_norm(inp + core_out)
+
+ return output
+
+
+class RelPartialLearnableMultiHeadAttn(nn.Module):
+ def __init__(
+ self,
+ n_head,
+ d_model,
+ d_head,
+ dropout,
+ dropatt=0,
+ pre_lnorm=False,
+ r_r_bias=None,
+ r_w_bias=None,
+ layer_norm_epsilon=1e-5,
+ ):
+ super().__init__()
+
+ self.n_head = n_head
+ self.d_model = d_model
+ self.d_head = d_head
+ self.dropout = dropout
+
+ self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False)
+
+ self.drop = nn.Dropout(dropout)
+ self.dropatt = nn.Dropout(dropatt)
+ self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
+
+ self.layer_norm = nn.LayerNorm(d_model, eps=layer_norm_epsilon)
+
+ self.scale = 1 / (d_head**0.5)
+
+ self.pre_lnorm = pre_lnorm
+
+ if r_r_bias is None or r_w_bias is None: # Biases are not shared
+ self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
+ self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
+ else:
+ self.r_r_bias = r_r_bias
+ self.r_w_bias = r_w_bias
+
+ self.r_net = nn.Linear(self.d_model, self.n_head * self.d_head, bias=False)
+
+ def _rel_shift(self, x):
+ zero_pad_shape = (x.size(0), 1) + x.size()[2:]
+ zero_pad = torch.zeros(zero_pad_shape, device=x.device, dtype=x.dtype)
+ x_padded = torch.cat([zero_pad, x], dim=1)
+
+ x_padded_shape = (x.size(1) + 1, x.size(0)) + x.size()[2:]
+ x_padded = x_padded.view(*x_padded_shape)
+
+ x = x_padded[1:].view_as(x)
+
+ return x
+
+ def forward(self, w, r, attn_mask=None, mems=None, head_mask=None, output_attentions=False):
+ qlen, rlen, bsz = w.size(0), r.size(0), w.size(1)
+
+ if mems is not None:
+ cat = torch.cat([mems, w], 0)
+ if self.pre_lnorm:
+ w_heads = self.qkv_net(self.layer_norm(cat))
+ else:
+ w_heads = self.qkv_net(cat)
+ r_head_k = self.r_net(r)
+
+ w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
+ w_head_q = w_head_q[-qlen:]
+ else:
+ if self.pre_lnorm:
+ w_heads = self.qkv_net(self.layer_norm(w))
+ else:
+ w_heads = self.qkv_net(w)
+ r_head_k = self.r_net(r)
+
+ w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
+
+ klen = w_head_k.size(0)
+
+ w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
+ w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
+ w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
+
+ r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) # qlen x n_head x d_head
+
+ # compute attention score
+ rw_head_q = w_head_q + self.r_w_bias # qlen x bsz x n_head x d_head
+ AC = torch.einsum("ibnd,jbnd->ijbn", (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head
+
+ rr_head_q = w_head_q + self.r_r_bias
+ BD = torch.einsum("ibnd,jnd->ijbn", (rr_head_q, r_head_k)) # qlen x klen x bsz x n_head
+ BD = self._rel_shift(BD)
+
+ # [qlen x klen x bsz x n_head]
+ attn_score = AC + BD
+ attn_score.mul_(self.scale)
+
+ mask_value = torch.finfo(attn_score.dtype).min
+
+ # compute attention probability
+ if attn_mask is not None and torch.sum(attn_mask).item():
+ attn_mask = attn_mask == 1 # Switch to bool
+ if attn_mask.dim() == 2:
+ attn_score = (
+ attn_score.float().masked_fill(attn_mask[None, :, :, None], mask_value).type_as(attn_score)
+ )
+ elif attn_mask.dim() == 3:
+ attn_score = attn_score.float().masked_fill(attn_mask[:, :, :, None], mask_value).type_as(attn_score)
+
+ # [qlen x klen x bsz x n_head]
+ attn_prob = nn.functional.softmax(attn_score, dim=1)
+ attn_prob = self.dropatt(attn_prob)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attn_prob = attn_prob * head_mask
+
+ # compute attention vector
+ attn_vec = torch.einsum("ijbn,jbnd->ibnd", (attn_prob, w_head_v))
+
+ # [qlen x bsz x n_head x d_head]
+ attn_vec = attn_vec.contiguous().view(attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
+
+ # linear projection
+ attn_out = self.o_net(attn_vec)
+ attn_out = self.drop(attn_out)
+
+ if self.pre_lnorm:
+ # residual connection
+ outputs = [w + attn_out]
+ else:
+ # residual connection + layer normalization
+ outputs = [self.layer_norm(w + attn_out)]
+
+ if output_attentions:
+ outputs.append(attn_prob)
+
+ return outputs
+
+
+class RelPartialLearnableDecoderLayer(nn.Module):
+ def __init__(self, n_head, d_model, d_head, d_inner, dropout, layer_norm_epsilon=1e-5, **kwargs):
+ super().__init__()
+
+ self.dec_attn = RelPartialLearnableMultiHeadAttn(
+ n_head, d_model, d_head, dropout, layer_norm_epsilon=layer_norm_epsilon, **kwargs
+ )
+ self.pos_ff = PositionwiseFF(
+ d_model, d_inner, dropout, pre_lnorm=kwargs.get("pre_lnorm"), layer_norm_epsilon=layer_norm_epsilon
+ )
+
+ def forward(self, dec_inp, r, dec_attn_mask=None, mems=None, head_mask=None, output_attentions=False):
+ attn_outputs = self.dec_attn(
+ dec_inp,
+ r,
+ attn_mask=dec_attn_mask,
+ mems=mems,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ )
+ ff_output = self.pos_ff(attn_outputs[0])
+
+ outputs = [ff_output] + attn_outputs[1:]
+
+ return outputs
+
+
+class AdaptiveEmbedding(nn.Module):
+ def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, sample_softmax=False):
+ super().__init__()
+
+ self.n_token = n_token
+ self.d_embed = d_embed
+
+ self.cutoffs = cutoffs + [n_token]
+ self.div_val = div_val
+ self.d_proj = d_proj
+
+ self.emb_scale = d_proj**0.5
+
+ self.cutoff_ends = [0] + self.cutoffs
+
+ self.emb_layers = nn.ModuleList()
+ self.emb_projs = nn.ParameterList()
+ if div_val == 1:
+ self.emb_layers.append(nn.Embedding(n_token, d_embed, sparse=sample_softmax > 0))
+ if d_proj != d_embed:
+ self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
+ else:
+ for i in range(len(self.cutoffs)):
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
+ d_emb_i = d_embed // (div_val**i)
+ self.emb_layers.append(nn.Embedding(r_idx - l_idx, d_emb_i))
+ self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i)))
+
+ def forward(self, inp):
+ if self.div_val == 1:
+ embed = self.emb_layers[0](inp)
+ if self.d_proj != self.d_embed:
+ embed = nn.functional.linear(embed, self.emb_projs[0])
+ else:
+ param = next(self.parameters())
+ inp_flat = inp.view(-1)
+ emb_flat = torch.zeros([inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device)
+ for i in range(len(self.cutoffs)):
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
+
+ mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
+ indices_i = mask_i.nonzero().squeeze()
+
+ if indices_i.numel() == 0:
+ continue
+
+ inp_i = inp_flat.index_select(0, indices_i) - l_idx
+ emb_i = self.emb_layers[i](inp_i)
+ emb_i = nn.functional.linear(emb_i, self.emb_projs[i])
+
+ emb_flat.index_copy_(0, indices_i, emb_i)
+
+ embed_shape = inp.size() + (self.d_proj,)
+ embed = emb_flat.view(embed_shape)
+
+ embed.mul_(self.emb_scale)
+
+ return embed
+
+
+class TransfoXLPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = TransfoXLConfig
+ load_tf_weights = load_tf_weights_in_transfo_xl
+ base_model_prefix = "transformer"
+
+ def _init_weight(self, weight):
+ if self.config.init == "uniform":
+ nn.init.uniform_(weight, -self.config.init_range, self.config.init_range)
+ elif self.config.init == "normal":
+ nn.init.normal_(weight, 0.0, self.config.init_std)
+
+ def _init_bias(self, bias):
+ nn.init.constant_(bias, 0.0)
+
+ def _init_weights(self, m):
+ """Initialize the weights."""
+ classname = m.__class__.__name__
+ if classname.find("Linear") != -1:
+ if hasattr(m, "weight") and m.weight is not None:
+ self._init_weight(m.weight)
+ if hasattr(m, "bias") and m.bias is not None:
+ self._init_bias(m.bias)
+ elif classname.find("AdaptiveEmbedding") != -1:
+ if hasattr(m, "emb_projs"):
+ for i in range(len(m.emb_projs)):
+ if m.emb_projs[i] is not None:
+ nn.init.normal_(m.emb_projs[i], 0.0, self.config.proj_init_std)
+ elif classname.find("Embedding") != -1:
+ if hasattr(m, "weight"):
+ self._init_weight(m.weight)
+ elif classname.find("ProjectedAdaptiveLogSoftmax") != -1:
+ if hasattr(m, "cluster_weight") and m.cluster_weight is not None:
+ self._init_weight(m.cluster_weight)
+ if hasattr(m, "cluster_bias") and m.cluster_bias is not None:
+ self._init_bias(m.cluster_bias)
+ if hasattr(m, "out_projs"):
+ for i in range(len(m.out_projs)):
+ if m.out_projs[i] is not None:
+ nn.init.normal_(m.out_projs[i], 0.0, self.config.proj_init_std)
+ elif classname.find("LayerNorm") != -1:
+ if hasattr(m, "weight"):
+ nn.init.normal_(m.weight, 1.0, self.config.init_std)
+ if hasattr(m, "bias") and m.bias is not None:
+ self._init_bias(m.bias)
+ else:
+ if hasattr(m, "r_emb"):
+ self._init_weight(m.r_emb)
+ if hasattr(m, "r_w_bias"):
+ self._init_weight(m.r_w_bias)
+ if hasattr(m, "r_r_bias"):
+ self._init_weight(m.r_r_bias)
+ if hasattr(m, "r_bias"):
+ self._init_bias(m.r_bias)
+
+ def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, layer: Optional[int] = -1):
+ """
+ Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size. Take care of tying
+ weights embeddings afterwards if the model class has a *tie_weights()* method.
+
+ Arguments:
+ new_num_tokens: (*optional*) int:
+ New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at
+ the end. Reducing the size will remove vectors from the end. If not provided or None: does nothing and
+ just returns a pointer to the input tokens `torch.nn.Embeddings` Module of the model.
+ layer: (*optional*) int:
+ Layer of the *AdaptiveEmbedding* where the resizing should be done. Per default the last layer will be
+ resized. Be aware that when resizing other than the last layer, you have to ensure that the new
+ token(s) in the tokenizer are at the corresponding position.
+
+ Return: `torch.nn.Embeddings` Pointer to the input tokens Embeddings Module of the model
+ """
+ base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed
+
+ if new_num_tokens is None:
+ return self.get_input_embeddings()
+
+ new_num_tokens_layer, layer = self._get_new_num_tokens_layer(new_num_tokens, layer)
+ assert new_num_tokens_layer > 0, "The size of the new embedding layer cannot be 0 or less"
+ model_embeds = base_model._resize_token_embeddings(new_num_tokens_layer, layer)
+
+ # Update base model and current model config
+ self.config.vocab_size = new_num_tokens
+ base_model.vocab_size = new_num_tokens
+ base_model.n_token = new_num_tokens
+
+ new_embedding_shapes = self._get_embedding_shapes()
+ self._resize_cutoffs(new_num_tokens, new_num_tokens_layer, new_embedding_shapes, layer)
+
+ # Tie weights again if needed
+ self.tie_weights()
+
+ return model_embeds
+
+ def _get_new_num_tokens_layer(self, new_num_tokens, layer):
+ embeddings = self.get_input_embeddings()
+ if layer == -1:
+ layer = len(embeddings.emb_layers) - 1
+ assert 0 <= layer <= len(embeddings.emb_layers) - 1
+
+ new_num_tokens_layer = (
+ new_num_tokens
+ - sum([emb.weight.shape[0] for emb in embeddings.emb_layers[:layer]])
+ - sum([emb.weight.shape[0] for emb in embeddings.emb_layers[layer + 1 :]])
+ )
+ return new_num_tokens_layer, layer
+
+ def _get_embedding_shapes(self):
+ embeddings = self.get_input_embeddings()
+ return [emb.weight.shape[0] for emb in embeddings.emb_layers]
+
+ def _resize_token_embeddings(self, new_num_tokens, layer=-1):
+ embeddings = self.get_input_embeddings()
+ if new_num_tokens is None:
+ return embeddings
+ new_embeddings_layer = self._get_resized_embeddings(embeddings.emb_layers[layer], new_num_tokens)
+ embeddings.emb_layers[layer] = new_embeddings_layer
+
+ self.set_input_embeddings(embeddings)
+
+ return self.get_input_embeddings()
+
+ def _resize_cutoffs(self, new_num_tokens, new_emb_size, new_embedding_shapes, layer):
+ embeddings = self.get_input_embeddings()
+
+ for i in range(layer, len(embeddings.cutoffs)):
+ embeddings.cutoffs[i] = sum(new_embedding_shapes[: i + 1])
+
+ embeddings.cutoff_ends = [0] + embeddings.cutoffs
+ embeddings.n_token = new_num_tokens
+
+ self.config.cutoffs = embeddings.cutoffs[:-1]
+
+ return embeddings.cutoffs
+
+
+@dataclass
+class TransfoXLModelOutput(ModelOutput):
+ """
+ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ mems (`List[torch.FloatTensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`
+ input) to speed up sequential decoding. The token ids which have their past given to this model should not
+ be passed as input ids as they have already been computed.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ last_hidden_state: torch.FloatTensor
+ mems: List[torch.FloatTensor] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class TransfoXLSequenceClassifierOutputWithPast(ModelOutput):
+ """
+ Base class for outputs of sentence classification models.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Classification (or regression if config.num_labels==1) loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
+ mems (`List[torch.FloatTensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`
+ input) to speed up sequential decoding. The token ids which have their past given to this model should not
+ be passed as input ids as they have already been computed.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ mems: List[torch.FloatTensor] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class TransfoXLLMHeadModelOutput(ModelOutput):
+ """
+ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
+
+ Args:
+ losses (`torch.FloatTensor` of shape *(batch_size, sequence_length-1)*, *optional*, returned when `labels` is provided):
+ Language modeling losses (not reduced).
+ prediction_scores (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token after SoftMax).
+ mems (`List[torch.FloatTensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`
+ input) to speed up sequential decoding. The token ids which have their past given to this model should not
+ be passed as input ids as they have already been computed.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ loss (`torch.FloatTensor` of shape `()`, *optional*, returned when `labels` is provided)
+ Reduced language modeling loss.
+ """
+
+ losses: Optional[torch.FloatTensor] = None
+ prediction_scores: torch.FloatTensor = None
+ mems: List[torch.FloatTensor] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+ loss: Optional[torch.FloatTensor] = None
+
+ @property
+ def logits(self):
+ # prediction scores are the output of the adaptive softmax, see
+ # the file `modeling_transfo_xl_utilities`. Since the adaptive
+ # softmax returns the log softmax value, `self.prediction_scores`
+ # are strictly speaking not exactly `logits`, but behave the same
+ # way logits do.
+ return self.prediction_scores
+
+
+TRANSFO_XL_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`TransfoXLConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+TRANSFO_XL_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ mems (`List[torch.FloatTensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see
+ `mems` output below). Can be used to speed up sequential decoding. The token ids which have their mems
+ given to this model should not be passed as `input_ids` as they have already been computed.
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
+ TRANSFO_XL_START_DOCSTRING,
+)
+class TransfoXLModel(TransfoXLPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.n_token = config.vocab_size
+
+ self.d_embed = config.d_embed
+ self.d_model = config.d_model
+ self.n_head = config.n_head
+ self.d_head = config.d_head
+
+ self.word_emb = AdaptiveEmbedding(
+ config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val
+ )
+
+ self.drop = nn.Dropout(config.dropout)
+
+ self.n_layer = config.n_layer
+ self.mem_len = config.mem_len
+ self.attn_type = config.attn_type
+
+ if not config.untie_r:
+ self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
+ self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
+
+ self.layers = nn.ModuleList()
+ if config.attn_type == 0: # the default attention
+ for i in range(config.n_layer):
+ self.layers.append(
+ RelPartialLearnableDecoderLayer(
+ config.n_head,
+ config.d_model,
+ config.d_head,
+ config.d_inner,
+ config.dropout,
+ dropatt=config.dropatt,
+ pre_lnorm=config.pre_lnorm,
+ r_w_bias=None if config.untie_r else self.r_w_bias,
+ r_r_bias=None if config.untie_r else self.r_r_bias,
+ layer_norm_epsilon=config.layer_norm_epsilon,
+ )
+ )
+ else: # learnable embeddings and absolute embeddings are not used in our pretrained checkpoints
+ raise NotImplementedError # Removed them to avoid maintaining dead code
+
+ self.same_length = config.same_length
+ self.clamp_len = config.clamp_len
+
+ if self.attn_type == 0: # default attention
+ self.pos_emb = PositionalEmbedding(self.d_model)
+ else: # learnable embeddings and absolute embeddings
+ raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.word_emb
+
+ def set_input_embeddings(self, new_embeddings):
+ self.word_emb = new_embeddings
+
+ def backward_compatible(self):
+ self.sample_softmax = -1
+
+ def reset_memory_length(self, mem_len):
+ self.mem_len = mem_len
+
+ def _prune_heads(self, heads):
+ logger.info("Head pruning is not implemented for Transformer-XL model")
+ pass
+
+ def init_mems(self, bsz):
+ if self.mem_len > 0:
+ mems = []
+ param = next(self.parameters())
+ for i in range(self.n_layer):
+ empty = torch.zeros(self.mem_len, bsz, self.config.d_model, dtype=param.dtype, device=param.device)
+ mems.append(empty)
+
+ return mems
+ else:
+ return None
+
+ def _update_mems(self, hids, mems, mlen, qlen):
+ # does not deal with None
+ if mems is None:
+ return None
+
+ # mems is not None
+ assert len(hids) == len(mems), "len(hids) != len(mems)"
+
+ # There are `mlen + qlen` steps that can be cached into mems
+ with torch.no_grad():
+ new_mems = []
+ end_idx = mlen + max(0, qlen)
+ beg_idx = max(0, end_idx - self.mem_len)
+ for i in range(len(hids)):
+ cat = torch.cat([mems[i], hids[i]], dim=0)
+ new_mems.append(cat[beg_idx:end_idx].detach())
+
+ return new_mems
+
+ @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TransfoXLModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ mems: Optional[List[torch.FloatTensor]] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, TransfoXLModelOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library
+ # so we transpose here from shape [bsz, len] to shape [len, bsz]
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_ids = input_ids.transpose(0, 1).contiguous()
+ qlen, bsz = input_ids.size()
+ elif inputs_embeds is not None:
+ inputs_embeds = inputs_embeds.transpose(0, 1).contiguous()
+ qlen, bsz = inputs_embeds.shape[0], inputs_embeds.shape[1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if mems is None:
+ mems = self.init_mems(bsz)
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
+ # and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
+ if head_mask is not None:
+ if head_mask.dim() == 1:
+ head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0)
+ head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1)
+ elif head_mask.dim() == 2:
+ head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1)
+ head_mask = head_mask.to(
+ dtype=next(self.parameters()).dtype
+ ) # switch to float if need + fp16 compatibility
+ else:
+ head_mask = [None] * self.n_layer
+
+ if inputs_embeds is not None:
+ word_emb = inputs_embeds
+ else:
+ word_emb = self.word_emb(input_ids)
+
+ mlen = mems[0].size(0) if mems is not None else 0
+ klen = mlen + qlen
+ if self.same_length:
+ all_ones = word_emb.new_ones((qlen, klen), dtype=torch.bool)
+ mask_len = klen - self.mem_len
+ if mask_len > 0:
+ mask_shift_len = qlen - mask_len
+ else:
+ mask_shift_len = qlen
+ dec_attn_mask = (torch.triu(all_ones, 1 + mlen) + torch.tril(all_ones, -mask_shift_len))[:, :, None] # -1
+ else:
+ dec_attn_mask = torch.triu(word_emb.new_ones((qlen, klen), dtype=torch.bool), diagonal=1 + mlen)[
+ :, :, None
+ ]
+
+ hids = []
+ attentions = [] if output_attentions else None
+ if self.attn_type == 0: # default
+ pos_seq = torch.arange(klen - 1, -1, -1.0, device=word_emb.device, dtype=torch.int64).type_as(
+ dtype=word_emb.dtype
+ )
+ if self.clamp_len > 0:
+ pos_seq.clamp_(max=self.clamp_len)
+ pos_emb = self.pos_emb(pos_seq)
+
+ core_out = self.drop(word_emb)
+ pos_emb = self.drop(pos_emb)
+
+ for i, layer in enumerate(self.layers):
+ hids.append(core_out)
+ mems_i = None if mems is None else mems[i]
+ layer_outputs = layer(
+ core_out,
+ pos_emb,
+ dec_attn_mask=dec_attn_mask,
+ mems=mems_i,
+ head_mask=head_mask[i],
+ output_attentions=output_attentions,
+ )
+ core_out = layer_outputs[0]
+ if output_attentions:
+ attentions.append(layer_outputs[1])
+ else: # learnable embeddings and absolute embeddings
+ raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
+
+ core_out = self.drop(core_out)
+
+ new_mems = self._update_mems(hids, mems, mlen, qlen)
+
+ if output_hidden_states:
+ # Add last layer and transpose to library standard shape [bsz, len, hidden_dim]
+ hids.append(core_out)
+ hids = tuple(t.transpose(0, 1).contiguous() for t in hids)
+ else:
+ hids = None
+ if output_attentions:
+ # Transpose to library standard shape [bsz, n_heads, query_seq_len, key_seq_len]
+ attentions = tuple(t.permute(2, 3, 0, 1).contiguous() for t in attentions)
+ # We transpose back here to shape [bsz, len, hidden_dim]
+ core_out = core_out.transpose(0, 1).contiguous()
+
+ if not return_dict:
+ return tuple(v for v in [core_out, new_mems, hids, attentions] if v is not None)
+
+ return TransfoXLModelOutput(
+ last_hidden_state=core_out,
+ mems=new_mems,
+ hidden_states=hids,
+ attentions=attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ The Transformer-XL Model with a language modeling head on top (adaptive softmax with weights tied to the adaptive
+ input embeddings)
+ """,
+ TRANSFO_XL_START_DOCSTRING,
+)
+class TransfoXLLMHeadModel(TransfoXLPreTrainedModel):
+ _tied_weights_keys = [r"crit\.out_projs\.\d+", r"crit\.out_layers\.\d+\.weight"]
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.transformer = TransfoXLModel(config)
+ self.sample_softmax = config.sample_softmax
+ self.trainer_compatible = getattr(config, "trainer_compatible", False)
+
+ if not self.trainer_compatible:
+ warnings.warn(
+ "The output of TransfoXL will be updated in v5 to support a single loss as first argument. In order "
+ "to use that updated output, please specify `trainer_compatible=True` as your configuration"
+ " attribute.",
+ DeprecationWarning,
+ )
+
+ assert self.sample_softmax <= 0, (
+ "Sampling from the softmax is not implemented yet. Please look at issue: #3310:"
+ " https://github.com/huggingface/transformers/issues/3310"
+ )
+
+ self.crit = ProjectedAdaptiveLogSoftmax(
+ config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def tie_weights(self):
+ """
+ Run this to be sure output and input (adaptive) softmax weights are tied
+ """
+
+ if self.config.tie_word_embeddings:
+ for i in range(len(self.crit.out_layers)):
+ self._tie_or_clone_weights(self.crit.out_layers[i], self.transformer.word_emb.emb_layers[i])
+ if self.config.tie_projs:
+ for i, tie_proj in enumerate(self.config.tie_projs):
+ if tie_proj and self.config.div_val == 1 and self.config.d_model != self.config.d_embed:
+ if self.config.torchscript:
+ self.crit.out_projs[i] = nn.Parameter(self.transformer.word_emb.emb_projs[0].clone())
+ else:
+ self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[0]
+ elif tie_proj and self.config.div_val != 1:
+ if self.config.torchscript:
+ self.crit.out_projs[i] = nn.Parameter(self.transformer.word_emb.emb_projs[i].clone())
+ else:
+ self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[i]
+
+ def reset_memory_length(self, mem_len):
+ self.transformer.reset_memory_length(mem_len)
+
+ def init_mems(self, bsz):
+ return self.transformer.init_mems(bsz)
+
+ @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TransfoXLLMHeadModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ mems: Optional[List[torch.FloatTensor]] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, TransfoXLLMHeadModelOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ if input_ids is not None:
+ bsz, tgt_len = input_ids.size(0), input_ids.size(1)
+ elif inputs_embeds is not None:
+ bsz, tgt_len = inputs_embeds.size(0), inputs_embeds.size(1)
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ transformer_outputs = self.transformer(
+ input_ids,
+ mems=mems,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ last_hidden = transformer_outputs[0]
+ pred_hid = last_hidden[:, -tgt_len:]
+
+ if labels is not None:
+ # Prevents all labels being -100 and throwing an error
+ # when backwarding the loss
+ miss_valid_label = labels[0, 1:].sum() == (labels.size(1) - 1) * -100
+ if miss_valid_label:
+ # Sets an token, just to prevent loss from being NaN
+ labels[0, 1] = self.config.eos_token_id
+
+ softmax_output = self.crit(pred_hid, labels)
+ prediction_scores = softmax_output.view(bsz, tgt_len, -1) if labels is None else ()
+
+ if labels is not None:
+ losses = softmax_output.view(bsz, tgt_len - 1)
+ # Avoids from incorporating padding (-100) tokens into loss value
+ loss = losses[losses != 0].mean()
+ else:
+ losses, loss = None, None
+
+ if not return_dict:
+ if self.trainer_compatible:
+ output = (prediction_scores, losses) if losses is not None else (prediction_scores,)
+ output += transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+ else:
+ output = (prediction_scores, *transformer_outputs[1:])
+ output = ((losses,) + output) if losses is not None else output
+ return (output + (loss,)) if loss is not None else output
+
+ return TransfoXLLMHeadModelOutput(
+ loss=loss,
+ prediction_scores=prediction_scores,
+ losses=losses,
+ mems=transformer_outputs.mems,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+ def get_output_embeddings(self):
+ """Double-check if you are using adaptive softmax."""
+ if self.sample_softmax > 0:
+ return self.out_layer
+ else:
+ return self.crit.out_layers[-1]
+
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **model_kwargs):
+ inputs = {}
+
+ # if past is defined in model kwargs then use it for faster decoding
+ if past_key_values:
+ inputs["mems"] = past_key_values
+ inputs["input_ids"] = input_ids[:, -1].unsqueeze(-1)
+ else:
+ inputs["input_ids"] = input_ids
+
+ return inputs
+
+ def _resize_cutoffs(self, new_num_tokens, new_emb_size, new_embedding_shapes, layer):
+ new_cutoffs = super()._resize_cutoffs(new_num_tokens, new_emb_size, new_embedding_shapes, layer)
+
+ self.crit.cutoffs = new_cutoffs
+ self.crit.cutoff_ends = [0] + new_cutoffs
+ self.crit.n_token = new_num_tokens
+
+ @staticmethod
+ def _reorder_cache(mems: List[torch.Tensor], beam_idx: torch.Tensor) -> List[torch.Tensor]:
+ """
+ This function is used to re-order the `mems` cache if [`~PreTrainedModel.beam_search`] or
+ [`~PreTrainedModel.beam_sample`] is called. This is required to match `mems` with the correct beam_idx at every
+ generation step.
+ """
+ return [layer_past.index_select(1, beam_idx.to(layer_past.device)) for layer_past in mems]
+
+
+@add_start_docstrings(
+ """
+ The Transformer-XL Model transformer with a sequence classification head on top (linear layer).
+
+ [`TransfoXLForSequenceClassification`] uses the last token in order to do the classification, as other causal
+ models (e.g. GPT-1) do.
+
+ Since it does classification on the last token, it requires to know the position of the last token. If a
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
+ each row of the batch).
+ """,
+ TRANSFO_XL_START_DOCSTRING,
+)
+class TransfoXLForSequenceClassification(TransfoXLPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.transformer = TransfoXLModel(config)
+ self.score = nn.Linear(config.d_embed, self.num_labels, bias=False)
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TransfoXLSequenceClassifierOutputWithPast,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ mems: Optional[List[torch.FloatTensor]] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, TransfoXLSequenceClassifierOutputWithPast]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.transformer(
+ input_ids,
+ mems=mems,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = transformer_outputs[0]
+ logits = self.score(hidden_states)
+
+ if input_ids is not None:
+ batch_size, sequence_length = input_ids.shape[:2]
+ else:
+ batch_size, sequence_length = inputs_embeds.shape[:2]
+
+ assert (
+ self.config.pad_token_id is not None or batch_size == 1
+ ), "Cannot handle batch sizes > 1 if no padding token is defined."
+ if self.config.pad_token_id is None:
+ sequence_lengths = -1
+ else:
+ if input_ids is not None:
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
+ sequence_lengths = sequence_lengths.to(logits.device)
+ else:
+ sequence_lengths = -1
+ logger.warning(
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
+ )
+
+ pooled_logits = logits[range(batch_size), sequence_lengths]
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(pooled_logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(pooled_logits, labels)
+ if not return_dict:
+ output = (pooled_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TransfoXLSequenceClassifierOutputWithPast(
+ loss=loss,
+ logits=pooled_logits,
+ mems=transformer_outputs.mems,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..b7cd3bdd0681c130f2d81b70faa6321e5cce9df6
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/__init__.py
@@ -0,0 +1,82 @@
+# Copyright 2023 The HuggingFace and Baidu Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+# rely on isort to merge the imports
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available
+
+
+_import_structure = {
+ "configuration_ernie_m": ["ERNIE_M_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieMConfig"],
+}
+
+try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_ernie_m"] = ["ErnieMTokenizer"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_ernie_m"] = [
+ "ERNIE_M_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "ErnieMForMultipleChoice",
+ "ErnieMForQuestionAnswering",
+ "ErnieMForSequenceClassification",
+ "ErnieMForTokenClassification",
+ "ErnieMModel",
+ "ErnieMPreTrainedModel",
+ "ErnieMForInformationExtraction",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_ernie_m import ERNIE_M_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieMConfig
+
+ try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_ernie_m import ErnieMTokenizer
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_ernie_m import (
+ ERNIE_M_PRETRAINED_MODEL_ARCHIVE_LIST,
+ ErnieMForInformationExtraction,
+ ErnieMForMultipleChoice,
+ ErnieMForQuestionAnswering,
+ ErnieMForSequenceClassification,
+ ErnieMForTokenClassification,
+ ErnieMModel,
+ ErnieMPreTrainedModel,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..abf128de04a927ac30ef8168895b31985980bd3f
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/configuration_ernie_m.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/configuration_ernie_m.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..942221241730b892ff43cfc72fd79ea121320c29
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/configuration_ernie_m.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/modeling_ernie_m.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/modeling_ernie_m.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..717829cc431f6ab92a916724199bb9213b8521fb
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/modeling_ernie_m.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/tokenization_ernie_m.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/tokenization_ernie_m.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7c61cc5215d2643dc1ed67fd9515d3ad4a15ec77
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/tokenization_ernie_m.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/configuration_ernie_m.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/configuration_ernie_m.py
new file mode 100644
index 0000000000000000000000000000000000000000..96451c9d9c999cd744efa41c90510822757561ed
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/configuration_ernie_m.py
@@ -0,0 +1,112 @@
+# coding=utf-8
+# Copyright 2023 Xuan Ouyang, Shuohuan Wang, Chao Pang, Yu Sun, Hao Tian, Hua Wu, Haifeng Wang and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" ErnieM model configuration"""
+# Adapted from original paddlenlp repository.(https://github.com/PaddlePaddle/PaddleNLP/blob/develop/paddlenlp/transformers/ernie_m/configuration.py)
+
+from __future__ import annotations
+
+from typing import Dict
+
+from ...configuration_utils import PretrainedConfig
+from ..deprecated._archive_maps import ERNIE_M_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class ErnieMConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`ErnieMModel`]. It is used to instantiate a
+ Ernie-M model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the `Ernie-M`
+ [susnato/ernie-m-base_pytorch](https://huggingface.co/susnato/ernie-m-base_pytorch) architecture.
+
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 250002):
+ Vocabulary size of `inputs_ids` in [`ErnieMModel`]. Also is the vocab size of token embedding matrix.
+ Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling
+ [`ErnieMModel`].
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the embedding layer, encoder layers and pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the feed-forward (ff) layer in the encoder. Input tensors to feed-forward layers are
+ firstly projected from hidden_size to intermediate_size, and then projected back to hidden_size. Typically
+ intermediate_size is larger than hidden_size.
+ hidden_act (`str`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function in the feed-forward layer. `"gelu"`, `"relu"` and any other torch
+ supported activation functions are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings and encoder.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability used in `MultiHeadAttention` in all encoder layers to drop some attention target.
+ max_position_embeddings (`int`, *optional*, defaults to 514):
+ The maximum value of the dimensionality of position encoding, which dictates the maximum supported length
+ of an input sequence.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the normal initializer for initializing all weight matrices. The index of padding
+ token in the token vocabulary.
+ pad_token_id (`int`, *optional*, defaults to 1):
+ Padding token id.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
+ The epsilon used by the layer normalization layers.
+ classifier_dropout (`float`, *optional*):
+ The dropout ratio for the classification head.
+ act_dropout (`float`, *optional*, defaults to 0.0):
+ This dropout probability is used in `ErnieMEncoderLayer` after activation.
+
+ A normal_initializer initializes weight matrices as normal distributions. See
+ `ErnieMPretrainedModel._init_weights()` for how weights are initialized in `ErnieMModel`.
+ """
+
+ model_type = "ernie_m"
+ attribute_map: Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
+
+ def __init__(
+ self,
+ vocab_size: int = 250002,
+ hidden_size: int = 768,
+ num_hidden_layers: int = 12,
+ num_attention_heads: int = 12,
+ intermediate_size: int = 3072,
+ hidden_act: str = "gelu",
+ hidden_dropout_prob: float = 0.1,
+ attention_probs_dropout_prob: float = 0.1,
+ max_position_embeddings: int = 514,
+ initializer_range: float = 0.02,
+ pad_token_id: int = 1,
+ layer_norm_eps: float = 1e-05,
+ classifier_dropout=None,
+ act_dropout=0.0,
+ **kwargs,
+ ):
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.intermediate_size = intermediate_size
+ self.hidden_act = hidden_act
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.max_position_embeddings = max_position_embeddings
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.classifier_dropout = classifier_dropout
+ self.act_dropout = act_dropout
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/modeling_ernie_m.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/modeling_ernie_m.py
new file mode 100644
index 0000000000000000000000000000000000000000..ac56e120a0c3d40769d109c85910cb62142c8268
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/modeling_ernie_m.py
@@ -0,0 +1,1058 @@
+# coding=utf-8
+# Copyright 2023 Xuan Ouyang, Shuohuan Wang, Chao Pang, Yu Sun, Hao Tian, Hua Wu, Haifeng Wang The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch ErnieM model."""
+
+
+import math
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn, tensor
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import (
+ BaseModelOutputWithPastAndCrossAttentions,
+ BaseModelOutputWithPoolingAndCrossAttentions,
+ MultipleChoiceModelOutput,
+ QuestionAnsweringModelOutput,
+ SequenceClassifierOutput,
+ TokenClassifierOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
+from .configuration_ernie_m import ErnieMConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "susnato/ernie-m-base_pytorch"
+_CONFIG_FOR_DOC = "ErnieMConfig"
+_TOKENIZER_FOR_DOC = "ErnieMTokenizer"
+
+
+from ..deprecated._archive_maps import ERNIE_M_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+# Adapted from paddlenlp.transformers.ernie_m.modeling.ErnieEmbeddings
+class ErnieMEmbeddings(nn.Module):
+ """Construct the embeddings from word and position embeddings."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.hidden_size = config.hidden_size
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ self.position_embeddings = nn.Embedding(
+ config.max_position_embeddings, config.hidden_size, padding_idx=config.pad_token_id
+ )
+ self.layer_norm = nn.LayerNorm(normalized_shape=config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(p=config.hidden_dropout_prob)
+ self.padding_idx = config.pad_token_id
+
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ inputs_embeds: Optional[torch.LongTensor] = None,
+ past_key_values_length: int = 0,
+ ) -> torch.Tensor:
+ if inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+ if position_ids is None:
+ input_shape = inputs_embeds.size()[:-1]
+ ones = torch.ones(input_shape, dtype=torch.int64, device=inputs_embeds.device)
+ seq_length = torch.cumsum(ones, dim=1)
+ position_ids = seq_length - ones
+
+ if past_key_values_length > 0:
+ position_ids = position_ids + past_key_values_length
+ # to mimic paddlenlp implementation
+ position_ids += 2
+ position_embeddings = self.position_embeddings(position_ids)
+ embeddings = inputs_embeds + position_embeddings
+ embeddings = self.layer_norm(embeddings)
+ embeddings = self.dropout(embeddings)
+
+ return embeddings
+
+
+# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->ErnieM,self.value->self.v_proj,self.key->self.k_proj,self.query->self.q_proj
+class ErnieMSelfAttention(nn.Module):
+ def __init__(self, config, position_embedding_type=None):
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads})"
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.q_proj = nn.Linear(config.hidden_size, self.all_head_size)
+ self.k_proj = nn.Linear(config.hidden_size, self.all_head_size)
+ self.v_proj = nn.Linear(config.hidden_size, self.all_head_size)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+ self.position_embedding_type = position_embedding_type or getattr(
+ config, "position_embedding_type", "absolute"
+ )
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ self.max_position_embeddings = config.max_position_embeddings
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
+
+ self.is_decoder = config.is_decoder
+
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ mixed_query_layer = self.q_proj(hidden_states)
+
+ # If this is instantiated as a cross-attention module, the keys
+ # and values come from an encoder; the attention mask needs to be
+ # such that the encoder's padding tokens are not attended to.
+ is_cross_attention = encoder_hidden_states is not None
+
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_layer = past_key_value[0]
+ value_layer = past_key_value[1]
+ attention_mask = encoder_attention_mask
+ elif is_cross_attention:
+ key_layer = self.transpose_for_scores(self.k_proj(encoder_hidden_states))
+ value_layer = self.transpose_for_scores(self.v_proj(encoder_hidden_states))
+ attention_mask = encoder_attention_mask
+ elif past_key_value is not None:
+ key_layer = self.transpose_for_scores(self.k_proj(hidden_states))
+ value_layer = self.transpose_for_scores(self.v_proj(hidden_states))
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
+ else:
+ key_layer = self.transpose_for_scores(self.k_proj(hidden_states))
+ value_layer = self.transpose_for_scores(self.v_proj(hidden_states))
+
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ use_cache = past_key_value is not None
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_layer, value_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
+ if use_cache:
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
+ -1, 1
+ )
+ else:
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
+ distance = position_ids_l - position_ids_r
+
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
+
+ if self.position_embedding_type == "relative_key":
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores
+ elif self.position_embedding_type == "relative_key_query":
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in ErnieMModel forward() function)
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ if self.is_decoder:
+ outputs = outputs + (past_key_value,)
+ return outputs
+
+
+class ErnieMAttention(nn.Module):
+ def __init__(self, config, position_embedding_type=None):
+ super().__init__()
+ self.self_attn = ErnieMSelfAttention(config, position_embedding_type=position_embedding_type)
+ self.out_proj = nn.Linear(config.hidden_size, config.hidden_size)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.self_attn.num_attention_heads, self.self_attn.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.self_attn.q_proj = prune_linear_layer(self.self_attn.q_proj, index)
+ self.self_attn.k_proj = prune_linear_layer(self.self_attn.k_proj, index)
+ self.self_attn.v_proj = prune_linear_layer(self.self_attn.v_proj, index)
+ self.out_proj = prune_linear_layer(self.out_proj, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.self_attn.num_attention_heads = self.self_attn.num_attention_heads - len(heads)
+ self.self_attn.all_head_size = self.self_attn.attention_head_size * self.self_attn.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ self_outputs = self.self_attn(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+ attention_output = self.out_proj(self_outputs[0])
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+class ErnieMEncoderLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ # to mimic paddlenlp implementation
+ dropout = 0.1 if config.hidden_dropout_prob is None else config.hidden_dropout_prob
+ act_dropout = config.hidden_dropout_prob if config.act_dropout is None else config.act_dropout
+
+ self.self_attn = ErnieMAttention(config)
+ self.linear1 = nn.Linear(config.hidden_size, config.intermediate_size)
+ self.dropout = nn.Dropout(act_dropout)
+ self.linear2 = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout1 = nn.Dropout(dropout)
+ self.dropout2 = nn.Dropout(dropout)
+ if isinstance(config.hidden_act, str):
+ self.activation = ACT2FN[config.hidden_act]
+ else:
+ self.activation = config.hidden_act
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = True,
+ ):
+ residual = hidden_states
+ if output_attentions:
+ hidden_states, attention_opt_weights = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ )
+
+ else:
+ hidden_states = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ )
+ hidden_states = residual + self.dropout1(hidden_states)
+ hidden_states = self.norm1(hidden_states)
+ residual = hidden_states
+
+ hidden_states = self.linear1(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.linear2(hidden_states)
+ hidden_states = residual + self.dropout2(hidden_states)
+ hidden_states = self.norm2(hidden_states)
+
+ if output_attentions:
+ return hidden_states, attention_opt_weights
+ else:
+ return hidden_states
+
+
+class ErnieMEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.layers = nn.ModuleList([ErnieMEncoderLayer(config) for _ in range(config.num_hidden_layers)])
+
+ def forward(
+ self,
+ input_embeds: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ output_hidden_states: Optional[bool] = False,
+ return_dict: Optional[bool] = True,
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
+ hidden_states = () if output_hidden_states else None
+ attentions = () if output_attentions else None
+
+ output = input_embeds
+ if output_hidden_states:
+ hidden_states = hidden_states + (output,)
+ for i, layer in enumerate(self.layers):
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+ past_key_value = past_key_values[i] if past_key_values is not None else None
+
+ output, opt_attn_weights = layer(
+ hidden_states=output,
+ attention_mask=attention_mask,
+ head_mask=layer_head_mask,
+ past_key_value=past_key_value,
+ )
+
+ if output_hidden_states:
+ hidden_states = hidden_states + (output,)
+ if output_attentions:
+ attentions = attentions + (opt_attn_weights,)
+
+ last_hidden_state = output
+ if not return_dict:
+ return tuple(v for v in [last_hidden_state, hidden_states, attentions] if v is not None)
+
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=last_hidden_state, hidden_states=hidden_states, attentions=attentions
+ )
+
+
+# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->ErnieM
+class ErnieMPooler(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.activation = nn.Tanh()
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(first_token_tensor)
+ pooled_output = self.activation(pooled_output)
+ return pooled_output
+
+
+class ErnieMPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = ErnieMConfig
+ base_model_prefix = "ernie_m"
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+ERNIE_M_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`ErnieMConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+ERNIE_M_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`ErnieMTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare ErnieM Model transformer outputting raw hidden-states without any specific head on top.",
+ ERNIE_M_START_DOCSTRING,
+)
+class ErnieMModel(ErnieMPreTrainedModel):
+ def __init__(self, config, add_pooling_layer=True):
+ super(ErnieMModel, self).__init__(config)
+ self.initializer_range = config.initializer_range
+ self.embeddings = ErnieMEmbeddings(config)
+ self.encoder = ErnieMEncoder(config)
+ self.pooler = ErnieMPooler(config) if add_pooling_layer else None
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.word_embeddings = value
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layers[layer].self_attn.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ processor_class=_TOKENIZER_FOR_DOC,
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithPastAndCrossAttentions,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[tensor] = None,
+ position_ids: Optional[tensor] = None,
+ attention_mask: Optional[tensor] = None,
+ head_mask: Optional[tensor] = None,
+ inputs_embeds: Optional[tensor] = None,
+ past_key_values: Optional[Tuple[Tuple[tensor]]] = None,
+ use_cache: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.FloatTensor], BaseModelOutputWithPoolingAndCrossAttentions]:
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time.")
+
+ # init the default bool value
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ past_key_values_length = 0
+ if past_key_values is not None:
+ past_key_values_length = past_key_values[0][0].shape[2]
+
+ # Adapted from paddlenlp.transformers.ernie_m.ErnieMModel
+ if attention_mask is None:
+ attention_mask = (input_ids == self.config.pad_token_id).to(torch.float32)
+ attention_mask *= torch.finfo(attention_mask.dtype).min
+ if past_key_values is not None:
+ batch_size = past_key_values[0][0].shape[0]
+ past_mask = torch.zeros([batch_size, 1, 1, past_key_values_length], dtype=attention_mask.dtype)
+ attention_mask = torch.concat([past_mask, attention_mask], dim=-1)
+ # For 2D attention_mask from tokenizer
+ elif attention_mask.ndim == 2:
+ attention_mask = attention_mask.to(torch.float32)
+ attention_mask = 1.0 - attention_mask
+ attention_mask *= torch.finfo(attention_mask.dtype).min
+
+ extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(1)
+
+ embedding_output = self.embeddings(
+ input_ids=input_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ past_key_values_length=past_key_values_length,
+ )
+ encoder_outputs = self.encoder(
+ embedding_output,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ past_key_values=past_key_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ if not return_dict:
+ sequence_output = encoder_outputs[0]
+ pooler_output = self.pooler(sequence_output) if self.pooler is not None else None
+ return (sequence_output, pooler_output) + encoder_outputs[1:]
+
+ sequence_output = encoder_outputs["last_hidden_state"]
+ pooler_output = self.pooler(sequence_output) if self.pooler is not None else None
+ hidden_states = None if not output_hidden_states else encoder_outputs["hidden_states"]
+ attentions = None if not output_attentions else encoder_outputs["attentions"]
+
+ return BaseModelOutputWithPoolingAndCrossAttentions(
+ last_hidden_state=sequence_output,
+ pooler_output=pooler_output,
+ hidden_states=hidden_states,
+ attentions=attentions,
+ )
+
+
+@add_start_docstrings(
+ """ErnieM Model transformer with a sequence classification/regression head on top (a linear layer on top of
+ the pooled output) e.g. for GLUE tasks.""",
+ ERNIE_M_START_DOCSTRING,
+)
+class ErnieMForSequenceClassification(ErnieMPreTrainedModel):
+ # Copied from transformers.models.bert.modeling_bert.BertForSequenceClassification.__init__ with Bert->ErnieM,bert->ernie_m
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.config = config
+
+ self.ernie_m = ErnieMModel(config)
+ classifier_dropout = (
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
+ )
+ self.dropout = nn.Dropout(classifier_dropout)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ processor_class=_TOKENIZER_FOR_DOC,
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=SequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.Tensor]] = None,
+ use_cache: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ return_dict: Optional[bool] = True,
+ labels: Optional[torch.Tensor] = None,
+ ) -> Union[Tuple[torch.FloatTensor], SequenceClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.ernie_m(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ past_key_values=past_key_values,
+ output_hidden_states=output_hidden_states,
+ output_attentions=output_attentions,
+ return_dict=return_dict,
+ )
+
+ pooled_output = outputs[1]
+
+ pooled_output = self.dropout(pooled_output)
+ logits = self.classifier(pooled_output)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """ErnieM Model with a multiple choice classification head on top (a linear layer on top of
+ the pooled output and a softmax) e.g. for RocStories/SWAG tasks.""",
+ ERNIE_M_START_DOCSTRING,
+)
+class ErnieMForMultipleChoice(ErnieMPreTrainedModel):
+ # Copied from transformers.models.bert.modeling_bert.BertForMultipleChoice.__init__ with Bert->ErnieM,bert->ernie_m
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.ernie_m = ErnieMModel(config)
+ classifier_dropout = (
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
+ )
+ self.dropout = nn.Dropout(classifier_dropout)
+ self.classifier = nn.Linear(config.hidden_size, 1)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=MultipleChoiceModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = True,
+ ) -> Union[Tuple[torch.FloatTensor], MultipleChoiceModelOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
+ `input_ids` above)
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
+
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
+ inputs_embeds = (
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
+ if inputs_embeds is not None
+ else None
+ )
+
+ outputs = self.ernie_m(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = outputs[1]
+
+ pooled_output = self.dropout(pooled_output)
+ logits = self.classifier(pooled_output)
+ reshaped_logits = logits.view(-1, num_choices)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(reshaped_logits, labels)
+
+ if not return_dict:
+ output = (reshaped_logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return MultipleChoiceModelOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """ErnieM Model with a token classification head on top (a linear layer on top of
+ the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.""",
+ ERNIE_M_START_DOCSTRING,
+)
+class ErnieMForTokenClassification(ErnieMPreTrainedModel):
+ # Copied from transformers.models.bert.modeling_bert.BertForTokenClassification.__init__ with Bert->ErnieM,bert->ernie_m
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.ernie_m = ErnieMModel(config, add_pooling_layer=False)
+ classifier_dropout = (
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
+ )
+ self.dropout = nn.Dropout(classifier_dropout)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ processor_class=_TOKENIZER_FOR_DOC,
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.Tensor]] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ return_dict: Optional[bool] = True,
+ labels: Optional[torch.Tensor] = None,
+ ) -> Union[Tuple[torch.FloatTensor], TokenClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.ernie_m(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ past_key_values=past_key_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ sequence_output = self.dropout(sequence_output)
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """ErnieM Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).""",
+ ERNIE_M_START_DOCSTRING,
+)
+class ErnieMForQuestionAnswering(ErnieMPreTrainedModel):
+ # Copied from transformers.models.bert.modeling_bert.BertForQuestionAnswering.__init__ with Bert->ErnieM,bert->ernie_m
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.ernie_m = ErnieMModel(config, add_pooling_layer=False)
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ processor_class=_TOKENIZER_FOR_DOC,
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=QuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ start_positions: Optional[torch.Tensor] = None,
+ end_positions: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = True,
+ ) -> Union[Tuple[torch.FloatTensor], QuestionAnsweringModelOutput]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.ernie_m(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = logits.split(1, dim=-1)
+ start_logits = start_logits.squeeze(-1).contiguous()
+ end_logits = end_logits.squeeze(-1).contiguous()
+
+ total_loss = None
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, split add a dimension
+ if len(start_positions.size()) > 1:
+ start_positions = start_positions.squeeze(-1)
+ if len(end_positions.size()) > 1:
+ end_positions = end_positions.squeeze(-1)
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
+ ignored_index = start_logits.size(1)
+ start_positions = start_positions.clamp(0, ignored_index)
+ end_positions = end_positions.clamp(0, ignored_index)
+
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[2:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return QuestionAnsweringModelOutput(
+ loss=total_loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """ErnieMForInformationExtraction is a Ernie-M Model with two linear layer on top of the hidden-states output to
+ compute `start_prob` and `end_prob`, designed for Universal Information Extraction.""",
+ ERNIE_M_START_DOCSTRING,
+)
+# Copied from paddlenlp.transformers.ernie_m.modeling.UIEM
+class ErnieMForInformationExtraction(ErnieMPreTrainedModel):
+ def __init__(self, config):
+ super(ErnieMForInformationExtraction, self).__init__(config)
+ self.ernie_m = ErnieMModel(config)
+ self.linear_start = nn.Linear(config.hidden_size, 1)
+ self.linear_end = nn.Linear(config.hidden_size, 1)
+ self.sigmoid = nn.Sigmoid()
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ start_positions: Optional[torch.Tensor] = None,
+ end_positions: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = True,
+ ) -> Union[Tuple[torch.FloatTensor], QuestionAnsweringModelOutput]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for position (index) for computing the start_positions loss. Position outside of the sequence are
+ not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) for computing the end_positions loss. Position outside of the sequence are not
+ taken into account for computing the loss.
+ """
+
+ result = self.ernie_m(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ if return_dict:
+ sequence_output = result.last_hidden_state
+ elif not return_dict:
+ sequence_output = result[0]
+
+ start_logits = self.linear_start(sequence_output)
+ start_logits = start_logits.squeeze(-1)
+ end_logits = self.linear_end(sequence_output)
+ end_logits = end_logits.squeeze(-1)
+
+ total_loss = None
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, split add a dimension
+ if len(start_positions.size()) > 1:
+ start_positions = start_positions.squeeze(-1)
+ if len(end_positions.size()) > 1:
+ end_positions = end_positions.squeeze(-1)
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
+ ignored_index = start_logits.size(1)
+ start_positions = start_positions.clamp(0, ignored_index)
+ end_positions = end_positions.clamp(0, ignored_index)
+
+ loss_fct = BCEWithLogitsLoss()
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if not return_dict:
+ return tuple(
+ i
+ for i in [total_loss, start_logits, end_logits, result.hidden_states, result.attentions]
+ if i is not None
+ )
+
+ return QuestionAnsweringModelOutput(
+ loss=total_loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=result.hidden_states,
+ attentions=result.attentions,
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/tokenization_ernie_m.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/tokenization_ernie_m.py
new file mode 100644
index 0000000000000000000000000000000000000000..0bd7edea1cab3a573fcd6dc07a12e98860588251
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/ernie_m/tokenization_ernie_m.py
@@ -0,0 +1,405 @@
+# coding=utf-8
+# Copyright 2023 Xuan Ouyang, Shuohuan Wang, Chao Pang, Yu Sun, Hao Tian, Hua Wu, Haifeng Wang and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for Ernie-M."""
+
+import io
+import os
+import unicodedata
+from typing import Any, Dict, List, Optional, Tuple
+
+import sentencepiece as spm
+
+from ...tokenization_utils import PreTrainedTokenizer
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+SPIECE_UNDERLINE = "▁"
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
+
+RESOURCE_FILES_NAMES = {
+ "sentencepiece_model_file": "sentencepiece.bpe.model",
+ "vocab_file": "vocab.txt",
+}
+
+
+# Adapted from paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer
+class ErnieMTokenizer(PreTrainedTokenizer):
+ r"""
+ Constructs a Ernie-M tokenizer. It uses the `sentencepiece` tools to cut the words to sub-words.
+
+ Args:
+ sentencepiece_model_file (`str`):
+ The file path of sentencepiece model.
+ vocab_file (`str`, *optional*):
+ The file path of the vocabulary.
+ do_lower_case (`str`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
+ A special token representing the `unknown (out-of-vocabulary)` token. An unknown token is set to be
+ `unk_token` inorder to be converted to an ID.
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
+ A special token separating two different sentences in the same input.
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
+ A special token used to make arrays of tokens the same size for batching purposes.
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
+ A special token used for sequence classification. It is the last token of the sequence when built with
+ special tokens.
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
+ A special token representing a masked token. This is the token used in the masked language modeling task
+ which the model tries to predict the original unmasked ones.
+ """
+
+ # Ernie-M model doesn't have token_type embedding.
+ model_input_names: List[str] = ["input_ids"]
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ resource_files_names = RESOURCE_FILES_NAMES
+
+ def __init__(
+ self,
+ sentencepiece_model_ckpt,
+ vocab_file=None,
+ do_lower_case=False,
+ encoding="utf8",
+ unk_token="[UNK]",
+ sep_token="[SEP]",
+ pad_token="[PAD]",
+ cls_token="[CLS]",
+ mask_token="[MASK]",
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
+ **kwargs,
+ ) -> None:
+ # Mask token behave like a normal word, i.e. include the space before it and
+ # is included in the raw text, there should be a match in a non-normalized sentence.
+
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+
+ self.do_lower_case = do_lower_case
+ self.sentencepiece_model_ckpt = sentencepiece_model_ckpt
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(sentencepiece_model_ckpt)
+
+ # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
+ if vocab_file is not None:
+ self.vocab = self.load_vocab(filepath=vocab_file)
+ else:
+ self.vocab = {self.sp_model.id_to_piece(id): id for id in range(self.sp_model.get_piece_size())}
+ self.reverse_vocab = {v: k for k, v in self.vocab.items()}
+
+ super().__init__(
+ do_lower_case=do_lower_case,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ vocab_file=vocab_file,
+ encoding=encoding,
+ sp_model_kwargs=self.sp_model_kwargs,
+ **kwargs,
+ )
+
+ def get_offset_mapping(self, text):
+ if text is None:
+ return None
+
+ split_tokens = self.tokenize(text)
+ normalized_text, char_mapping = "", []
+
+ for i, ch in enumerate(text):
+ if ch in self.SP_CHAR_MAPPING:
+ ch = self.SP_CHAR_MAPPING.get(ch)
+ else:
+ ch = unicodedata.normalize("NFKC", ch)
+ if self.is_whitespace(ch):
+ continue
+ normalized_text += ch
+ char_mapping.extend([i] * len(ch))
+
+ text, token_mapping, offset = normalized_text, [], 0
+
+ if self.do_lower_case:
+ text = text.lower()
+
+ for token in split_tokens:
+ if token[:1] == "▁":
+ token = token[1:]
+ start = text[offset:].index(token) + offset
+ end = start + len(token)
+
+ token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1))
+ offset = end
+ return token_mapping
+
+ @property
+ def vocab_size(self):
+ return len(self.vocab)
+
+ def get_vocab(self):
+ return dict(self.vocab, **self.added_tokens_encoder)
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["sp_model"] = None
+ return state
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+
+ # for backward compatibility
+ if not hasattr(self, "sp_model_kwargs"):
+ self.sp_model_kwargs = {}
+
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(self.sentencepiece_model_ckpt)
+
+ def clean_text(self, text):
+ """Performs invalid character removal and whitespace cleanup on text."""
+ return "".join((self.SP_CHAR_MAPPING.get(c, c) for c in text))
+
+ def _tokenize(self, text, enable_sampling=False, nbest_size=64, alpha=0.1):
+ """Tokenize a string."""
+
+ if self.sp_model_kwargs.get("enable_sampling") is True:
+ enable_sampling = True
+ if self.sp_model_kwargs.get("alpha") is not None:
+ alpha = self.sp_model_kwargs.get("alpha")
+ if self.sp_model_kwargs.get("nbest_size") is not None:
+ nbest_size = self.sp_model_kwargs.get("nbest_size")
+
+ if not enable_sampling:
+ pieces = self.sp_model.EncodeAsPieces(text)
+ else:
+ pieces = self.sp_model.SampleEncodeAsPieces(text, nbest_size, alpha)
+ new_pieces = []
+ for pi, piece in enumerate(pieces):
+ if piece == SPIECE_UNDERLINE:
+ if not pieces[pi + 1].startswith(SPIECE_UNDERLINE) and pi != 0:
+ new_pieces.append(SPIECE_UNDERLINE)
+ continue
+ else:
+ continue
+ lst_i = 0
+ for i, chunk in enumerate(piece):
+ if chunk == SPIECE_UNDERLINE:
+ continue
+ if self.is_ch_char(chunk) or self.is_punct(chunk):
+ if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
+ new_pieces.append(piece[lst_i:i])
+ new_pieces.append(chunk)
+ lst_i = i + 1
+ elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
+ if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
+ new_pieces.append(piece[lst_i:i])
+ lst_i = i
+ elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
+ if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
+ new_pieces.append(piece[lst_i:i])
+ lst_i = i
+ if len(piece) > lst_i:
+ new_pieces.append(piece[lst_i:])
+ return new_pieces
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (strings for sub-words) in a single string."""
+ out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
+ return out_string
+
+ def convert_ids_to_string(self, ids):
+ """
+ Converts a sequence of tokens (strings for sub-words) in a single string.
+ """
+ tokens = self.convert_ids_to_tokens(ids)
+ out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
+ return out_string
+
+ # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
+ def _convert_token_to_id(self, token):
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
+
+ # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.reverse_vocab.get(index, self.unk_token)
+
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
+ r"""
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. An ErnieM sequence has the following format:
+
+ - single sequence: `[CLS] X [SEP]`
+ - pair of sequences: `[CLS] A [SEP] [SEP] B [SEP]`
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ Returns:
+ `List[int]`: List of input_id with the appropriate special tokens.
+ """
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+ _cls = [self.cls_token_id]
+ _sep = [self.sep_token_id]
+ return _cls + token_ids_0 + _sep + _sep + token_ids_1 + _sep
+
+ def build_offset_mapping_with_special_tokens(self, offset_mapping_0, offset_mapping_1=None):
+ r"""
+ Build offset map from a pair of offset map by concatenating and adding offsets of special tokens. An Ernie-M
+ offset_mapping has the following format:
+
+ - single sequence: `(0,0) X (0,0)`
+ - pair of sequences: `(0,0) A (0,0) (0,0) B (0,0)`
+
+ Args:
+ offset_mapping_ids_0 (`List[tuple]`):
+ List of char offsets to which the special tokens will be added.
+ offset_mapping_ids_1 (`List[tuple]`, *optional*):
+ Optional second list of wordpiece offsets for offset mapping pairs.
+ Returns:
+ `List[tuple]`: List of wordpiece offsets with the appropriate offsets of special tokens.
+ """
+ if offset_mapping_1 is None:
+ return [(0, 0)] + offset_mapping_0 + [(0, 0)]
+
+ return [(0, 0)] + offset_mapping_0 + [(0, 0), (0, 0)] + offset_mapping_1 + [(0, 0)]
+
+ def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
+ r"""
+ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `encode` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of ids of the first sequence.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`str`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+ Returns:
+ `List[int]`:
+ The list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ if token_ids_1 is not None:
+ raise ValueError(
+ "You should not supply a second sequence if the provided sequence of "
+ "ids is already formatted with special tokens for the model."
+ )
+ return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_0]
+
+ if token_ids_1 is not None:
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1]
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create the token type IDs corresponding to the sequences passed. [What are token type
+ IDs?](../glossary#token-type-ids) Should be overridden in a subclass if the model has a special way of
+ building: those.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ The first tokenized sequence.
+ token_ids_1 (`List[int]`, *optional*):
+ The second tokenized sequence.
+ Returns:
+ `List[int]`: The token type ids.
+ """
+ # called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
+ if token_ids_1 is None:
+ # [CLS] X [SEP]
+ return (len(token_ids_0) + 2) * [0]
+
+ # [CLS] A [SEP] [SEP] B [SEP]
+ return [0] * (len(token_ids_0) + 1) + [1] * (len(token_ids_1) + 3)
+
+ def is_ch_char(self, char):
+ """
+ is_ch_char
+ """
+ if "\u4e00" <= char <= "\u9fff":
+ return True
+ return False
+
+ def is_alpha(self, char):
+ """
+ is_alpha
+ """
+ if ("a" <= char <= "z") or ("A" <= char <= "Z"):
+ return True
+ return False
+
+ def is_punct(self, char):
+ """
+ is_punct
+ """
+ if char in ",;:.?!~,;:。?!《》【】":
+ return True
+ return False
+
+ def is_whitespace(self, char):
+ """
+ is whitespace
+ """
+ if char == " " or char == "\t" or char == "\n" or char == "\r":
+ return True
+ if len(char) == 1:
+ cat = unicodedata.category(char)
+ if cat == "Zs":
+ return True
+ return False
+
+ def load_vocab(self, filepath):
+ token_to_idx = {}
+ with io.open(filepath, "r", encoding="utf-8") as f:
+ for index, line in enumerate(f):
+ token = line.rstrip("\n")
+ token_to_idx[token] = int(index)
+
+ return token_to_idx
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ index = 0
+ if os.path.isdir(save_directory):
+ vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+ else:
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
+ with open(vocab_file, "w", encoding="utf-8") as writer:
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
+ if index != token_index:
+ logger.warning(
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
+ " Please check that the vocabulary is not corrupted!"
+ )
+ index = token_index
+ writer.write(token + "\n")
+ index += 1
+
+ tokenizer_model_file = os.path.join(save_directory, "sentencepiece.bpe.model")
+ with open(tokenizer_model_file, "wb") as fi:
+ content_spiece_model = self.sp_model.serialized_model_proto()
+ fi.write(content_spiece_model)
+
+ return (vocab_file,)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..485160d1ccaa69b035e20c5710e9e5b319423816
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/__init__.py
@@ -0,0 +1,107 @@
+# Copyright 2021 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_sentencepiece_available,
+ is_tokenizers_available,
+ is_torch_available,
+)
+
+
+_import_structure = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
+
+try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_fnet"] = ["FNetTokenizer"]
+
+try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_fnet_fast"] = ["FNetTokenizerFast"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_fnet"] = [
+ "FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "FNetForMaskedLM",
+ "FNetForMultipleChoice",
+ "FNetForNextSentencePrediction",
+ "FNetForPreTraining",
+ "FNetForQuestionAnswering",
+ "FNetForSequenceClassification",
+ "FNetForTokenClassification",
+ "FNetLayer",
+ "FNetModel",
+ "FNetPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
+
+ try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_fnet import FNetTokenizer
+
+ try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_fnet_fast import FNetTokenizerFast
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_fnet import (
+ FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
+ FNetForMaskedLM,
+ FNetForMultipleChoice,
+ FNetForNextSentencePrediction,
+ FNetForPreTraining,
+ FNetForQuestionAnswering,
+ FNetForSequenceClassification,
+ FNetForTokenClassification,
+ FNetLayer,
+ FNetModel,
+ FNetPreTrainedModel,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/__pycache__/convert_fnet_original_flax_checkpoint_to_pytorch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/__pycache__/convert_fnet_original_flax_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f65f8d29fcf4c7f7ed364f395f42d003c508c76e
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/__pycache__/convert_fnet_original_flax_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/__pycache__/modeling_fnet.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/__pycache__/modeling_fnet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cf6d4e6ecef81da839b33ec422f968ff548ea973
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/__pycache__/modeling_fnet.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/__pycache__/tokenization_fnet.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/__pycache__/tokenization_fnet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c6b5a2d77ee7ec9cb3238f02ec6255a01266d502
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/__pycache__/tokenization_fnet.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/__pycache__/tokenization_fnet_fast.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/__pycache__/tokenization_fnet_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2c2ae49c11b906bf24278de2f0455192f0f5e2c7
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/__pycache__/tokenization_fnet_fast.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/configuration_fnet.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/configuration_fnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..4678cae92e2a29a40317e61d3bd9371d941f7d30
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/configuration_fnet.py
@@ -0,0 +1,119 @@
+# coding=utf-8
+# Copyright 2021 Google AI and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" FNet model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class FNetConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`FNetModel`]. It is used to instantiate an FNet
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the FNet
+ [google/fnet-base](https://huggingface.co/google/fnet-base) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 32000):
+ Vocabulary size of the FNet model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`FNetModel`] or [`TFFNetModel`].
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimension of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu_new"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ type_vocab_size (`int`, *optional*, defaults to 4):
+ The vocabulary size of the `token_type_ids` passed when calling [`FNetModel`] or [`TFFNetModel`].
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ use_tpu_fourier_optimizations (`bool`, *optional*, defaults to `False`):
+ Determines whether to use TPU optimized FFTs. If `True`, the model will favor axis-wise FFTs transforms.
+ Set to `False` for GPU/CPU hardware, in which case n-dimensional FFTs are used.
+ tpu_short_seq_length (`int`, *optional*, defaults to 512):
+ The sequence length that is expected by the model when using TPUs. This will be used to initialize the DFT
+ matrix only when *use_tpu_fourier_optimizations* is set to `True` and the input sequence is shorter than or
+ equal to 4096 tokens.
+
+ Example:
+
+ ```python
+ >>> from transformers import FNetConfig, FNetModel
+
+ >>> # Initializing a FNet fnet-base style configuration
+ >>> configuration = FNetConfig()
+
+ >>> # Initializing a model (with random weights) from the fnet-base style configuration
+ >>> model = FNetModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "fnet"
+
+ def __init__(
+ self,
+ vocab_size=32000,
+ hidden_size=768,
+ num_hidden_layers=12,
+ intermediate_size=3072,
+ hidden_act="gelu_new",
+ hidden_dropout_prob=0.1,
+ max_position_embeddings=512,
+ type_vocab_size=4,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ use_tpu_fourier_optimizations=False,
+ tpu_short_seq_length=512,
+ pad_token_id=3,
+ bos_token_id=1,
+ eos_token_id=2,
+ **kwargs,
+ ):
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
+
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.intermediate_size = intermediate_size
+ self.hidden_act = hidden_act
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.initializer_range = initializer_range
+ self.type_vocab_size = type_vocab_size
+ self.layer_norm_eps = layer_norm_eps
+ self.use_tpu_fourier_optimizations = use_tpu_fourier_optimizations
+ self.tpu_short_seq_length = tpu_short_seq_length
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/convert_fnet_original_flax_checkpoint_to_pytorch.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/convert_fnet_original_flax_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..f77a44874ae42919ccbdb32d35e8272074d80acc
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/convert_fnet_original_flax_checkpoint_to_pytorch.py
@@ -0,0 +1,157 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert FNet checkpoint."""
+
+
+import argparse
+
+import torch
+from flax.training.checkpoints import restore_checkpoint
+
+from transformers import FNetConfig, FNetForPreTraining
+from transformers.utils import logging
+
+
+logging.set_verbosity_info()
+
+
+def convert_flax_checkpoint_to_pytorch(flax_checkpoint_path, fnet_config_file, save_path):
+ # Initialise PyTorch model
+ config = FNetConfig.from_json_file(fnet_config_file)
+ print(f"Building PyTorch model from configuration: {config}")
+ fnet_pretraining_model = FNetForPreTraining(config)
+
+ checkpoint_dict = restore_checkpoint(flax_checkpoint_path, None)
+ pretrained_model_params = checkpoint_dict["target"]
+
+ # Embeddings
+ # Position IDs
+ state_dict = fnet_pretraining_model.state_dict()
+
+ position_ids = state_dict["fnet.embeddings.position_ids"]
+ new_state_dict = {"fnet.embeddings.position_ids": position_ids}
+ # Embedding Layers
+ new_state_dict["fnet.embeddings.word_embeddings.weight"] = torch.tensor(
+ pretrained_model_params["encoder"]["embedder"]["word"]["embedding"]
+ )
+ new_state_dict["fnet.embeddings.position_embeddings.weight"] = torch.tensor(
+ pretrained_model_params["encoder"]["embedder"]["position"]["embedding"][0]
+ )
+ new_state_dict["fnet.embeddings.token_type_embeddings.weight"] = torch.tensor(
+ pretrained_model_params["encoder"]["embedder"]["type"]["embedding"]
+ )
+ new_state_dict["fnet.embeddings.projection.weight"] = torch.tensor(
+ pretrained_model_params["encoder"]["embedder"]["hidden_mapping_in"]["kernel"]
+ ).T
+ new_state_dict["fnet.embeddings.projection.bias"] = torch.tensor(
+ pretrained_model_params["encoder"]["embedder"]["hidden_mapping_in"]["bias"]
+ )
+ new_state_dict["fnet.embeddings.LayerNorm.weight"] = torch.tensor(
+ pretrained_model_params["encoder"]["embedder"]["layer_norm"]["scale"]
+ )
+ new_state_dict["fnet.embeddings.LayerNorm.bias"] = torch.tensor(
+ pretrained_model_params["encoder"]["embedder"]["layer_norm"]["bias"]
+ )
+
+ # Encoder Layers
+ for layer in range(config.num_hidden_layers):
+ new_state_dict[f"fnet.encoder.layer.{layer}.fourier.output.LayerNorm.weight"] = torch.tensor(
+ pretrained_model_params["encoder"][f"encoder_{layer}"]["mixing_layer_norm"]["scale"]
+ )
+ new_state_dict[f"fnet.encoder.layer.{layer}.fourier.output.LayerNorm.bias"] = torch.tensor(
+ pretrained_model_params["encoder"][f"encoder_{layer}"]["mixing_layer_norm"]["bias"]
+ )
+
+ new_state_dict[f"fnet.encoder.layer.{layer}.intermediate.dense.weight"] = torch.tensor(
+ pretrained_model_params["encoder"][f"feed_forward_{layer}"]["intermediate"]["kernel"]
+ ).T
+ new_state_dict[f"fnet.encoder.layer.{layer}.intermediate.dense.bias"] = torch.tensor(
+ pretrained_model_params["encoder"][f"feed_forward_{layer}"]["intermediate"]["bias"]
+ )
+
+ new_state_dict[f"fnet.encoder.layer.{layer}.output.dense.weight"] = torch.tensor(
+ pretrained_model_params["encoder"][f"feed_forward_{layer}"]["output"]["kernel"]
+ ).T
+ new_state_dict[f"fnet.encoder.layer.{layer}.output.dense.bias"] = torch.tensor(
+ pretrained_model_params["encoder"][f"feed_forward_{layer}"]["output"]["bias"]
+ )
+
+ new_state_dict[f"fnet.encoder.layer.{layer}.output.LayerNorm.weight"] = torch.tensor(
+ pretrained_model_params["encoder"][f"encoder_{layer}"]["output_layer_norm"]["scale"]
+ )
+ new_state_dict[f"fnet.encoder.layer.{layer}.output.LayerNorm.bias"] = torch.tensor(
+ pretrained_model_params["encoder"][f"encoder_{layer}"]["output_layer_norm"]["bias"]
+ )
+
+ # Pooler Layers
+ new_state_dict["fnet.pooler.dense.weight"] = torch.tensor(pretrained_model_params["encoder"]["pooler"]["kernel"]).T
+ new_state_dict["fnet.pooler.dense.bias"] = torch.tensor(pretrained_model_params["encoder"]["pooler"]["bias"])
+
+ # Masked LM Layers
+ new_state_dict["cls.predictions.transform.dense.weight"] = torch.tensor(
+ pretrained_model_params["predictions_dense"]["kernel"]
+ ).T
+ new_state_dict["cls.predictions.transform.dense.bias"] = torch.tensor(
+ pretrained_model_params["predictions_dense"]["bias"]
+ )
+ new_state_dict["cls.predictions.transform.LayerNorm.weight"] = torch.tensor(
+ pretrained_model_params["predictions_layer_norm"]["scale"]
+ )
+ new_state_dict["cls.predictions.transform.LayerNorm.bias"] = torch.tensor(
+ pretrained_model_params["predictions_layer_norm"]["bias"]
+ )
+ new_state_dict["cls.predictions.decoder.weight"] = torch.tensor(
+ pretrained_model_params["encoder"]["embedder"]["word"]["embedding"]
+ )
+ new_state_dict["cls.predictions.decoder.bias"] = torch.tensor(
+ pretrained_model_params["predictions_output"]["output_bias"]
+ )
+ new_state_dict["cls.predictions.bias"] = torch.tensor(pretrained_model_params["predictions_output"]["output_bias"])
+
+ # Seq Relationship Layers
+ new_state_dict["cls.seq_relationship.weight"] = torch.tensor(
+ pretrained_model_params["classification"]["output_kernel"]
+ )
+ new_state_dict["cls.seq_relationship.bias"] = torch.tensor(
+ pretrained_model_params["classification"]["output_bias"]
+ )
+
+ # Load State Dict
+ fnet_pretraining_model.load_state_dict(new_state_dict)
+
+ # Save PreTrained
+ print(f"Saving pretrained model to {save_path}")
+ fnet_pretraining_model.save_pretrained(save_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--flax_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
+ )
+ parser.add_argument(
+ "--fnet_config_file",
+ default=None,
+ type=str,
+ required=True,
+ help=(
+ "The config json file corresponding to the pre-trained FNet model. \n"
+ "This specifies the model architecture."
+ ),
+ )
+ parser.add_argument("--save_path", default=None, type=str, required=True, help="Path to the output model.")
+ args = parser.parse_args()
+ convert_flax_checkpoint_to_pytorch(args.flax_checkpoint_path, args.fnet_config_file, args.save_path)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/tokenization_fnet.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/tokenization_fnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..a38114eb6d01ae6bee3c48e82513b7085b865ed2
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/fnet/tokenization_fnet.py
@@ -0,0 +1,338 @@
+# coding=utf-8
+# Copyright 2021 Google Research, Google AI, Google Brain and the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Tokenization classes for FNet model."""
+
+import os
+import unicodedata
+from shutil import copyfile
+from typing import Any, Dict, List, Optional, Tuple
+
+import sentencepiece as spm
+
+from ...tokenization_utils import AddedToken, PreTrainedTokenizer
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
+
+
+SPIECE_UNDERLINE = "▁"
+
+
+class FNetTokenizer(PreTrainedTokenizer):
+ """
+ Construct an FNet tokenizer. Adapted from [`AlbertTokenizer`]. Based on
+ [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`]
+ which contains most of the main methods. Users should refer to this superclass for more information regarding those
+ methods.
+
+ Args:
+ vocab_file (`str`):
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
+ contains the vocabulary necessary to instantiate a tokenizer.
+ do_lower_case (`bool`, *optional*, defaults to `False`):
+ Whether or not to lowercase the input when tokenizing.
+ remove_space (`bool`, *optional*, defaults to `True`):
+ Whether or not to strip the text when tokenizing (removing excess spaces before and after the string).
+ keep_accents (`bool`, *optional*, defaults to `True`):
+ Whether or not to keep accents when tokenizing.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ sp_model_kwargs (`dict`, *optional*):
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
+ to set:
+
+ - `enable_sampling`: Enable subword regularization.
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
+
+ - `nbest_size = {0,1}`: No sampling is performed.
+ - `nbest_size > 1`: samples from the nbest_size results.
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
+ using forward-filtering-and-backward-sampling algorithm.
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
+ BPE-dropout.
+
+ Attributes:
+ sp_model (`SentencePieceProcessor`):
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "token_type_ids"]
+
+ def __init__(
+ self,
+ vocab_file,
+ do_lower_case=False,
+ remove_space=True,
+ keep_accents=True,
+ unk_token="",
+ sep_token="[SEP]",
+ pad_token="",
+ cls_token="[CLS]",
+ mask_token="[MASK]",
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
+ **kwargs,
+ ) -> None:
+ # Mask token behave like a normal word, i.e. include the space before it and
+ # is included in the raw text, there should be a match in a non-normalized sentence.
+ mask_token = AddedToken(mask_token, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token
+ cls_token = AddedToken(cls_token, special=True) if isinstance(cls_token, str) else cls_token
+ sep_token = AddedToken(sep_token, special=True) if isinstance(sep_token, str) else sep_token
+ mask_token = AddedToken(mask_token, special=True) if isinstance(mask_token, str) else mask_token
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+
+ self.do_lower_case = do_lower_case
+ self.remove_space = remove_space
+ self.keep_accents = keep_accents
+ self.vocab_file = vocab_file
+
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(vocab_file)
+
+ super().__init__(
+ do_lower_case=do_lower_case,
+ remove_space=remove_space,
+ keep_accents=keep_accents,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ sp_model_kwargs=self.sp_model_kwargs,
+ **kwargs,
+ )
+
+ @property
+ def vocab_size(self):
+ return len(self.sp_model)
+
+ def get_vocab(self):
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["sp_model"] = None
+ return state
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+
+ # for backward compatibility
+ if not hasattr(self, "sp_model_kwargs"):
+ self.sp_model_kwargs = {}
+
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(self.vocab_file)
+
+ def preprocess_text(self, inputs):
+ if self.remove_space:
+ outputs = " ".join(inputs.strip().split())
+ else:
+ outputs = inputs
+ outputs = outputs.replace("``", '"').replace("''", '"')
+
+ if not self.keep_accents:
+ outputs = unicodedata.normalize("NFKD", outputs)
+ outputs = "".join([c for c in outputs if not unicodedata.combining(c)])
+ if self.do_lower_case:
+ outputs = outputs.lower()
+
+ return outputs
+
+ def _tokenize(self, text: str) -> List[str]:
+ """Tokenize a string."""
+ text = self.preprocess_text(text)
+ pieces = self.sp_model.encode(text, out_type=str)
+ new_pieces = []
+ for piece in pieces:
+ if len(piece) > 1 and piece[-1] == str(",") and piece[-2].isdigit():
+ cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, ""))
+ if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
+ if len(cur_pieces[0]) == 1:
+ cur_pieces = cur_pieces[1:]
+ else:
+ cur_pieces[0] = cur_pieces[0][1:]
+ cur_pieces.append(piece[-1])
+ new_pieces.extend(cur_pieces)
+ else:
+ new_pieces.append(piece)
+
+ return new_pieces
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.sp_model.PieceToId(token)
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.sp_model.IdToPiece(index)
+
+ # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.convert_tokens_to_string
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ current_sub_tokens = []
+ out_string = ""
+ prev_is_special = False
+ for token in tokens:
+ # make sure that special tokens are not decoded using sentencepiece model
+ if token in self.all_special_tokens:
+ if not prev_is_special:
+ out_string += " "
+ out_string += self.sp_model.decode(current_sub_tokens) + token
+ prev_is_special = True
+ current_sub_tokens = []
+ else:
+ current_sub_tokens.append(token)
+ prev_is_special = False
+ out_string += self.sp_model.decode(current_sub_tokens)
+ return out_string.strip()
+
+ def _decode(
+ self,
+ token_ids: List[int],
+ skip_special_tokens: bool = False,
+ clean_up_tokenization_spaces: bool = None,
+ spaces_between_special_tokens: bool = False,
+ **kwargs,
+ ) -> str:
+ text = super()._decode(
+ token_ids=token_ids,
+ skip_special_tokens=skip_special_tokens,
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
+ spaces_between_special_tokens=spaces_between_special_tokens,
+ **kwargs,
+ )
+ # Mimic the behavior of the Rust tokenizer:
+ # No space after
+ if not spaces_between_special_tokens:
+ text = text.replace(" ", "")
+ return text
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. An FNet sequence has the following format:
+
+ - single sequence: `[CLS] X [SEP]`
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+ if token_ids_1 is None:
+ return cls + token_ids_0 + sep
+ return cls + token_ids_0 + sep + token_ids_1 + sep
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is not None:
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1]
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. An FNet sequence
+ pair mask has the following format: :
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+ elif not os.path.isfile(self.vocab_file):
+ with open(out_vocab_file, "wb") as fi:
+ content_spiece_model = self.sp_model.serialized_model_proto()
+ fi.write(content_spiece_model)
+
+ return (out_vocab_file,)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/rwkv/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/rwkv/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..07934141e5a5d58ad5c0312d4ff55c164ee870f0
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/rwkv/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/rwkv/__pycache__/configuration_rwkv.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/rwkv/__pycache__/configuration_rwkv.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b5e5d47d5e9795b1716c35f379483c1dbf9dd950
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/rwkv/__pycache__/configuration_rwkv.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/rwkv/__pycache__/convert_rwkv_checkpoint_to_hf.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/rwkv/__pycache__/convert_rwkv_checkpoint_to_hf.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a23bf0d01686f9bb7fdba638b7047acecf2577d0
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/rwkv/__pycache__/convert_rwkv_checkpoint_to_hf.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/rwkv/__pycache__/modeling_rwkv.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/rwkv/__pycache__/modeling_rwkv.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..791e6e54b0a6bb920c39782b1bc697381bd2419b
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/rwkv/__pycache__/modeling_rwkv.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/rwkv/convert_rwkv_checkpoint_to_hf.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/rwkv/convert_rwkv_checkpoint_to_hf.py
new file mode 100644
index 0000000000000000000000000000000000000000..b340b9f028b3d736e4da544a20ecfef9c88e714f
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/rwkv/convert_rwkv_checkpoint_to_hf.py
@@ -0,0 +1,201 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert a RWKV checkpoint from BlinkDL to the Hugging Face format."""
+
+
+import argparse
+import gc
+import json
+import os
+import re
+
+import torch
+from huggingface_hub import hf_hub_download
+
+from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
+from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
+
+
+NUM_HIDDEN_LAYERS_MAPPING = {
+ "169M": 12,
+ "430M": 24,
+ "1B5": 24,
+ "3B": 32,
+ "7B": 32,
+ "14B": 40,
+}
+
+HIDEN_SIZE_MAPPING = {
+ "169M": 768,
+ "430M": 1024,
+ "1B5": 2048,
+ "3B": 2560,
+ "7B": 4096,
+ "14B": 5120,
+}
+
+
+def convert_state_dict(state_dict):
+ state_dict_keys = list(state_dict.keys())
+ for name in state_dict_keys:
+ weight = state_dict.pop(name)
+ # emb -> embedding
+ if name.startswith("emb."):
+ name = name.replace("emb.", "embeddings.")
+ # ln_0 -> pre_ln (only present at block 0)
+ if name.startswith("blocks.0.ln0"):
+ name = name.replace("blocks.0.ln0", "blocks.0.pre_ln")
+ # att -> attention
+ name = re.sub(r"blocks\.(\d+)\.att", r"blocks.\1.attention", name)
+ # ffn -> feed_forward
+ name = re.sub(r"blocks\.(\d+)\.ffn", r"blocks.\1.feed_forward", name)
+ # time_mix_k -> time_mix_key and reshape
+ if name.endswith(".time_mix_k"):
+ name = name.replace(".time_mix_k", ".time_mix_key")
+ # time_mix_v -> time_mix_value and reshape
+ if name.endswith(".time_mix_v"):
+ name = name.replace(".time_mix_v", ".time_mix_value")
+ # time_mix_r -> time_mix_key and reshape
+ if name.endswith(".time_mix_r"):
+ name = name.replace(".time_mix_r", ".time_mix_receptance")
+
+ if name != "head.weight":
+ name = "rwkv." + name
+
+ state_dict[name] = weight
+ return state_dict
+
+
+def convert_rmkv_checkpoint_to_hf_format(
+ repo_id, checkpoint_file, output_dir, size=None, tokenizer_file=None, push_to_hub=False, model_name=None
+):
+ # 1. If possible, build the tokenizer.
+ if tokenizer_file is None:
+ print("No `--tokenizer_file` provided, we will use the default tokenizer.")
+ vocab_size = 50277
+ tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
+ else:
+ tokenizer = PreTrainedTokenizerFast(tokenizer_file=tokenizer_file)
+ vocab_size = len(tokenizer)
+ tokenizer.save_pretrained(output_dir)
+
+ # 2. Build the config
+ possible_sizes = list(NUM_HIDDEN_LAYERS_MAPPING.keys())
+ if size is None:
+ # Try to infer size from the checkpoint name
+ for candidate in possible_sizes:
+ if candidate in checkpoint_file:
+ size = candidate
+ break
+ if size is None:
+ raise ValueError("Could not infer the size, please provide it with the `--size` argument.")
+ if size not in possible_sizes:
+ raise ValueError(f"`size` should be one of {possible_sizes}, got {size}.")
+
+ config = RwkvConfig(
+ vocab_size=vocab_size,
+ num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size],
+ hidden_size=HIDEN_SIZE_MAPPING[size],
+ )
+ config.save_pretrained(output_dir)
+
+ # 3. Download model file then convert state_dict
+ model_file = hf_hub_download(repo_id, checkpoint_file)
+ state_dict = torch.load(model_file, map_location="cpu")
+ state_dict = convert_state_dict(state_dict)
+
+ # 4. Split in shards and save
+ shards, index = shard_checkpoint(state_dict)
+ for shard_file, shard in shards.items():
+ torch.save(shard, os.path.join(output_dir, shard_file))
+
+ if index is not None:
+ save_index_file = os.path.join(output_dir, WEIGHTS_INDEX_NAME)
+ # Save the index as well
+ with open(save_index_file, "w", encoding="utf-8") as f:
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
+ f.write(content)
+
+ # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
+ print(
+ "Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model."
+ )
+ shard_files = list(shards.keys())
+
+ del state_dict
+ del shards
+ gc.collect()
+
+ for shard_file in shard_files:
+ state_dict = torch.load(os.path.join(output_dir, shard_file))
+ torch.save({k: v.cpu().clone() for k, v in state_dict.items()}, os.path.join(output_dir, shard_file))
+
+ del state_dict
+ gc.collect()
+
+ if push_to_hub:
+ if model_name is None:
+ raise ValueError("Please provide a `model_name` to push the model to the Hub.")
+ model = AutoModelForCausalLM.from_pretrained(output_dir)
+ model.push_to_hub(model_name, max_shard_size="2GB")
+ tokenizer.push_to_hub(model_name)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
+ )
+ parser.add_argument(
+ "--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
+ )
+ parser.add_argument(
+ "--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
+ )
+ parser.add_argument(
+ "--tokenizer_file",
+ default=None,
+ type=str,
+ help="Path to the tokenizer file to use (if not provided, only the model is converted).",
+ )
+ parser.add_argument(
+ "--size",
+ default=None,
+ type=str,
+ help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
+ )
+ parser.add_argument(
+ "--push_to_hub",
+ action="store_true",
+ help="Push to the Hub the converted model.",
+ )
+ parser.add_argument(
+ "--model_name",
+ default=None,
+ type=str,
+ help="Name of the pushed model on the Hub, including the username / organization.",
+ )
+
+ args = parser.parse_args()
+ convert_rmkv_checkpoint_to_hf_format(
+ args.repo_id,
+ args.checkpoint_file,
+ args.output_dir,
+ size=args.size,
+ tokenizer_file=args.tokenizer_file,
+ push_to_hub=args.push_to_hub,
+ model_name=args.model_name,
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/rwkv/modeling_rwkv.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/rwkv/modeling_rwkv.py
new file mode 100644
index 0000000000000000000000000000000000000000..79e06d141bb846498cd291c8c91455919da9a4b7
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/rwkv/modeling_rwkv.py
@@ -0,0 +1,862 @@
+# coding=utf-8
+# Copyright 2023 Bo Peng and HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch RWKV model."""
+
+import math
+from dataclasses import dataclass
+from pathlib import Path
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import CrossEntropyLoss
+
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_bitsandbytes_available,
+ is_ninja_available,
+ is_torch_cuda_available,
+ logging,
+)
+from .configuration_rwkv import RwkvConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "RWKV/rwkv-4-169m-pile"
+_CONFIG_FOR_DOC = "RwkvConfig"
+
+
+from ..deprecated._archive_maps import RWKV_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+rwkv_cuda_kernel = None
+
+
+def load_wkv_cuda_kernel(context_length):
+ from torch.utils.cpp_extension import load as load_kernel
+
+ global rwkv_cuda_kernel
+
+ kernel_folder = Path(__file__).resolve().parent.parent.parent / "kernels" / "rwkv"
+ cuda_kernel_files = [kernel_folder / f for f in ["wkv_op.cpp", "wkv_cuda.cu", "wkv_cuda_bf16.cu"]]
+
+ # Only load the kernel if it's not been loaded yet or if we changed the context length
+ if rwkv_cuda_kernel is not None and rwkv_cuda_kernel.max_seq_length == context_length:
+ return
+
+ logger.info(f"Loading CUDA kernel for RWKV at context length of {context_length}.")
+
+ flags = [
+ "-res-usage",
+ "--maxrregcount 60",
+ "--use_fast_math",
+ "-O3",
+ "-Xptxas -O3",
+ "--extra-device-vectorization",
+ f"-DTmax={context_length}",
+ ]
+ rwkv_cuda_kernel = load_kernel(
+ name=f"wkv_{context_length}",
+ sources=cuda_kernel_files,
+ verbose=(logging.get_verbosity() == logging.DEBUG),
+ extra_cuda_cflags=flags,
+ )
+ rwkv_cuda_kernel.max_seq_length = context_length
+
+
+class RwkvLinearAttention(torch.autograd.Function):
+ @staticmethod
+ def forward(ctx, time_decay, time_first, key, value, state=None, return_state=False):
+ batch_size, seq_len, hidden_size = key.size()
+ if seq_len > rwkv_cuda_kernel.max_seq_length:
+ raise ValueError(
+ f"Cannot process a batch with {seq_len} tokens at the same time, use a maximum of "
+ f"{rwkv_cuda_kernel.max_seq_length} with this model."
+ )
+ if batch_size * hidden_size % min(hidden_size, 32) != 0:
+ raise ValueError(
+ f"The product of batch size ({batch_size}) and hidden size ({hidden_size}) needs to be a round "
+ f"multiple of {min(hidden_size, 32)}."
+ )
+
+ ctx.input_dtype = key.dtype
+
+ if (
+ time_decay.device.type != "cuda"
+ or time_first.device.type != "cuda"
+ or key.device.type != "cuda"
+ or value.device.type != "cuda"
+ ):
+ raise ValueError("Calling the CUDA kernel for wkv attention requires all tensors to be on CUDA devices.")
+
+ time_decay = -torch.exp(time_decay.float().contiguous())
+ if key.dtype == torch.float16:
+ time_first = time_first.float()
+ key = key.float()
+ value = value.float()
+ time_first = time_first.contiguous()
+ key = key.contiguous()
+ value = value.contiguous()
+ # The CUDA kernel will fill this tensor.
+ output = torch.empty_like(key, memory_format=torch.contiguous_format)
+ if return_state or state is not None:
+ if state is None:
+ state = torch.zeros(
+ batch_size,
+ hidden_size,
+ 3,
+ dtype=torch.float32,
+ device=key.device,
+ memory_format=torch.contiguous_format,
+ )
+ state[:, :, 2] -= 1e38
+ else:
+ state = torch.cat([s.unsqueeze(2) for s in state], dim=2).contiguous()
+ if key.dtype == torch.bfloat16:
+ forward_func = rwkv_cuda_kernel.forward_with_state_bf16
+ else:
+ forward_func = rwkv_cuda_kernel.forward_with_state
+ forward_func(time_decay, time_first, key, value, output, state)
+ else:
+ forward_func = rwkv_cuda_kernel.forward_bf16 if key.dtype == torch.bfloat16 else rwkv_cuda_kernel.forward
+ forward_func(time_decay, time_first, key, value, output)
+
+ ctx.save_for_backward(time_decay, time_first, key, value, output)
+
+ if state is not None:
+ state = [s.squeeze(2) for s in torch.chunk(state, 3, dim=2)]
+
+ return output.to(ctx.input_dtype), state
+
+ @staticmethod
+ # g stands for grad
+ def backward(ctx, g_output, g_state=None):
+ input_dtype = ctx.input_dtype
+
+ time_decay, time_first, key, value, output = ctx.saved_tensors
+ # The CUDA kernel will fill those tensors.
+ g_time_decay = torch.empty_like(
+ time_decay,
+ memory_format=torch.contiguous_format,
+ dtype=torch.bfloat16 if input_dtype == torch.bfloat16 else torch.float32,
+ )
+ g_time_first = torch.empty_like(time_first, memory_format=torch.contiguous_format)
+ g_key = torch.empty_like(key, memory_format=torch.contiguous_format)
+ g_value = torch.empty_like(value, memory_format=torch.contiguous_format)
+
+ if input_dtype == torch.float16:
+ g_output = g_output.float()
+ backward_func = rwkv_cuda_kernel.backward_bf16 if input_dtype == torch.bfloat16 else rwkv_cuda_kernel.backward
+ backward_func(
+ time_decay,
+ time_first,
+ key,
+ value,
+ output,
+ g_output.contiguous(),
+ g_time_decay,
+ g_time_first,
+ g_key,
+ g_value,
+ )
+
+ return (
+ g_time_decay.to(input_dtype),
+ g_time_first.to(input_dtype),
+ g_key.to(input_dtype),
+ g_value.to(input_dtype),
+ None,
+ None,
+ )
+
+
+def rwkv_linear_attention_cpu(time_decay, time_first, key, value, state=None, return_state=False):
+ # For CPU fallback. Will be slower and probably take more memory than the custom CUDA kernel if not executed
+ # within a torch.no_grad.
+ _, seq_length, _ = key.size()
+ output = torch.zeros_like(key)
+
+ if state is None:
+ num_state = torch.zeros_like(key[:, 0], dtype=torch.float32)
+ den_state = torch.zeros_like(key[:, 0], dtype=torch.float32)
+ max_state = torch.zeros_like(key[:, 0], dtype=torch.float32) - 1e38
+ else:
+ num_state, den_state, max_state = state
+ # For numerical stability
+ # real_numerator_state = num_state * torch.exp(max_state)
+ # real_denominator_state = den_state * torch.exp(max_state)
+
+ time_decay = -torch.exp(time_decay)
+
+ for current_index in range(seq_length):
+ current_key = key[:, current_index].float()
+ current_value = value[:, current_index]
+
+ # wkv computation at time t
+ max_for_output = torch.maximum(max_state, current_key + time_first)
+ e1 = torch.exp(max_state - max_for_output)
+ e2 = torch.exp(current_key + time_first - max_for_output)
+ numerator = e1 * num_state + e2 * current_value
+ denominator = e1 * den_state + e2
+ output[:, current_index] = (numerator / denominator).to(output.dtype)
+
+ # Update state for next iteration
+ max_for_state = torch.maximum(max_state + time_decay, current_key)
+ e1 = torch.exp(max_state + time_decay - max_for_state)
+ e2 = torch.exp(current_key - max_for_state)
+ num_state = e1 * num_state + e2 * current_value
+ den_state = e1 * den_state + e2
+ max_state = max_for_state
+
+ if return_state or state is not None:
+ state = [num_state, den_state, max_state]
+
+ return output, state
+
+
+def rwkv_linear_attention(time_decay, time_first, key, value, state=None, return_state=False):
+ no_cuda = any(t.device.type != "cuda" for t in [time_decay, time_first, key, value])
+ # Launching the CUDA kernel for just one token will actually be slower (there is no for loop in the CPU version
+ # in this case).
+ one_token = key.size(1) == 1
+ if rwkv_cuda_kernel is None or no_cuda or one_token:
+ return rwkv_linear_attention_cpu(time_decay, time_first, key, value, state=state, return_state=return_state)
+ else:
+ return RwkvLinearAttention.apply(time_decay, time_first, key, value, state, return_state)
+
+
+class RwkvSelfAttention(nn.Module):
+ def __init__(self, config, layer_id=0):
+ super().__init__()
+ self.config = config
+ kernel_loaded = rwkv_cuda_kernel is not None and rwkv_cuda_kernel.max_seq_length == config.context_length
+ if is_ninja_available() and is_torch_cuda_available() and not kernel_loaded:
+ try:
+ load_wkv_cuda_kernel(config.context_length)
+ except Exception:
+ logger.info("Could not load the custom CUDA kernel for RWKV attention.")
+ self.layer_id = layer_id
+ hidden_size = config.hidden_size
+ attention_hidden_size = (
+ config.attention_hidden_size if config.attention_hidden_size is not None else hidden_size
+ )
+ self.attention_hidden_size = attention_hidden_size
+
+ self.time_decay = nn.Parameter(torch.empty(attention_hidden_size))
+ self.time_first = nn.Parameter(torch.empty(attention_hidden_size))
+
+ self.time_mix_key = nn.Parameter(torch.empty(1, 1, hidden_size))
+ self.time_mix_value = nn.Parameter(torch.empty(1, 1, hidden_size))
+ self.time_mix_receptance = nn.Parameter(torch.empty(1, 1, hidden_size))
+
+ self.time_shift = nn.ZeroPad2d((0, 0, 1, -1))
+ self.key = nn.Linear(hidden_size, attention_hidden_size, bias=False)
+ self.value = nn.Linear(hidden_size, attention_hidden_size, bias=False)
+ self.receptance = nn.Linear(hidden_size, attention_hidden_size, bias=False)
+ self.output = nn.Linear(attention_hidden_size, hidden_size, bias=False)
+
+ # TODO: maybe jit, otherwise move inside forward
+ def extract_key_value(self, hidden, state=None):
+ # Mix hidden with the previous timestep to produce key, value, receptance
+ if hidden.size(1) == 1 and state is not None:
+ shifted = state[1][:, :, self.layer_id]
+ else:
+ shifted = self.time_shift(hidden)
+ if state is not None:
+ shifted[:, 0] = state[1][:, :, self.layer_id]
+ key = hidden * self.time_mix_key + shifted * (1 - self.time_mix_key)
+ value = hidden * self.time_mix_value + shifted * (1 - self.time_mix_value)
+ receptance = hidden * self.time_mix_receptance + shifted * (1 - self.time_mix_receptance)
+
+ key = self.key(key)
+ value = self.value(value)
+ receptance = torch.sigmoid(self.receptance(receptance))
+ if state is not None:
+ state[1][:, :, self.layer_id] = hidden[:, -1]
+ return receptance, key, value, state
+
+ def forward(self, hidden, state=None, use_cache=False):
+ receptance, key, value, state = self.extract_key_value(hidden, state=state)
+ layer_state = tuple(s[:, :, self.layer_id] for s in state[2:]) if state is not None else None
+ rwkv, layer_state = rwkv_linear_attention(
+ self.time_decay,
+ self.time_first,
+ key,
+ value,
+ state=layer_state,
+ return_state=use_cache,
+ )
+
+ if layer_state is not None:
+ state[2][:, :, self.layer_id] = layer_state[0]
+ state[3][:, :, self.layer_id] = layer_state[1]
+ state[4][:, :, self.layer_id] = layer_state[2]
+
+ return self.output(receptance * rwkv), state
+
+
+class RwkvFeedForward(nn.Module):
+ def __init__(self, config, layer_id=0):
+ super().__init__()
+ self.config = config
+ self.layer_id = layer_id
+ hidden_size = config.hidden_size
+ intermediate_size = (
+ config.intermediate_size if config.intermediate_size is not None else 4 * config.hidden_size
+ )
+
+ self.time_shift = nn.ZeroPad2d((0, 0, 1, -1))
+ self.time_mix_key = nn.Parameter(torch.empty(1, 1, hidden_size))
+ self.time_mix_receptance = nn.Parameter(torch.empty(1, 1, hidden_size))
+
+ self.key = nn.Linear(hidden_size, intermediate_size, bias=False)
+ self.receptance = nn.Linear(hidden_size, hidden_size, bias=False)
+ self.value = nn.Linear(intermediate_size, hidden_size, bias=False)
+
+ def forward(self, hidden, state=None):
+ if hidden.size(1) == 1 and state is not None:
+ shifted = state[0][:, :, self.layer_id]
+ else:
+ shifted = self.time_shift(hidden)
+ if state is not None:
+ shifted[:, 0] = state[0][:, :, self.layer_id]
+ key = hidden * self.time_mix_key + shifted * (1 - self.time_mix_key)
+ receptance = hidden * self.time_mix_receptance + shifted * (1 - self.time_mix_receptance)
+
+ key = torch.square(torch.relu(self.key(key)))
+ value = self.value(key)
+ receptance = torch.sigmoid(self.receptance(receptance))
+
+ if state is not None:
+ state[0][:, :, self.layer_id] = hidden[:, -1]
+
+ return receptance * value, state
+
+
+class RwkvBlock(nn.Module):
+ def __init__(self, config, layer_id):
+ super().__init__()
+ self.config = config
+ self.layer_id = layer_id
+
+ if layer_id == 0:
+ self.pre_ln = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_epsilon)
+
+ self.ln1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_epsilon)
+ self.ln2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_epsilon)
+
+ self.attention = RwkvSelfAttention(config, layer_id)
+ self.feed_forward = RwkvFeedForward(config, layer_id)
+
+ def forward(self, hidden, state=None, use_cache=False, output_attentions=False):
+ if self.layer_id == 0:
+ hidden = self.pre_ln(hidden)
+
+ attention, state = self.attention(self.ln1(hidden), state=state, use_cache=use_cache)
+ hidden = hidden + attention
+
+ feed_forward, state = self.feed_forward(self.ln2(hidden), state=state)
+ hidden = hidden + feed_forward
+
+ outputs = (hidden, state)
+ if output_attentions:
+ outputs += (attention,)
+ else:
+ outputs += (None,)
+
+ return outputs
+
+
+class RwkvPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = RwkvConfig
+ base_model_prefix = "rwkv"
+ _no_split_modules = ["RwkvBlock"]
+ _keep_in_fp32_modules = ["time_decay", "time_first"]
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ """Initialize the weights."""
+ if isinstance(module, RwkvSelfAttention):
+ layer_id = module.layer_id
+ num_hidden_layers = module.config.num_hidden_layers
+ hidden_size = module.config.hidden_size
+ attention_hidden_size = module.attention_hidden_size
+
+ ratio_0_to_1 = layer_id / (num_hidden_layers - 1) # 0 to 1
+ ratio_1_to_almost0 = 1.0 - (layer_id / num_hidden_layers) # 1 to ~0
+
+ time_weight = torch.tensor(
+ [i / hidden_size for i in range(hidden_size)],
+ dtype=module.time_mix_key.dtype,
+ device=module.time_mix_key.device,
+ )
+ time_weight = time_weight[None, None, :]
+
+ decay_speed = [
+ -5 + 8 * (h / (attention_hidden_size - 1)) ** (0.7 + 1.3 * ratio_0_to_1)
+ for h in range(attention_hidden_size)
+ ]
+ decay_speed = torch.tensor(decay_speed, dtype=module.time_decay.dtype, device=module.time_decay.device)
+ zigzag = (
+ torch.tensor(
+ [(i + 1) % 3 - 1 for i in range(attention_hidden_size)],
+ dtype=module.time_first.dtype,
+ device=module.time_first.device,
+ )
+ * 0.5
+ )
+
+ with torch.no_grad():
+ module.time_decay.data = decay_speed
+ module.time_first.data = torch.ones_like(module.time_first * math.log(0.3) + zigzag)
+
+ module.time_mix_key.data = torch.pow(time_weight, ratio_1_to_almost0)
+ module.time_mix_value.data = torch.pow(time_weight, ratio_1_to_almost0) + 0.3 * ratio_0_to_1
+ module.time_mix_receptance.data = torch.pow(time_weight, 0.5 * ratio_1_to_almost0)
+ elif isinstance(module, RwkvFeedForward):
+ layer_id = module.layer_id
+ num_hidden_layers = module.config.num_hidden_layers
+ hidden_size = module.config.hidden_size
+
+ ratio_1_to_almost0 = 1.0 - (layer_id / num_hidden_layers) # 1 to ~0
+
+ time_weight = torch.tensor(
+ [i / hidden_size for i in range(hidden_size)],
+ dtype=module.time_mix_key.dtype,
+ device=module.time_mix_key.device,
+ )
+ time_weight = time_weight[None, None, :]
+
+ with torch.no_grad():
+ module.time_mix_key.data = torch.pow(time_weight, ratio_1_to_almost0)
+ module.time_mix_receptance.data = torch.pow(time_weight, ratio_1_to_almost0)
+
+
+@dataclass
+class RwkvOutput(ModelOutput):
+ """
+ Class for the RWKV model outputs.
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ state (list of five `torch.FloatTensor` of shape `(batch_size, hidden_size, num_hidden_layers)`):
+ The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to
+ avoid providing the old `input_ids`.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ last_hidden_state: torch.FloatTensor = None
+ state: Optional[List[torch.FloatTensor]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+@dataclass
+class RwkvCausalLMOutput(ModelOutput):
+ """
+ Base class for causal language model (or autoregressive) outputs.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss (for next-token prediction).
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ state (list of five `torch.FloatTensor` of shape `(batch_size, hidden_size, num_hidden_layers)`):
+ The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to
+ avoid providing the old `input_ids`.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ state: Optional[List[torch.FloatTensor]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+RWKV_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`RwkvConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+RWKV_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
+ `input_ids_length` = `sequence_length` if `past_key_values` is `None` else
+ `past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input
+ sequence tokens in the vocabulary.
+
+ If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
+ `input_ids`.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ This is currently not used by `RwkvModel`, but will be supported in the future.
+
+ [What are attention masks?](../glossary#attention-mask)
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ state (tuple of five `torch.FloatTensor` of shape `(batch_size, hidden_size, num_hidden_layers)`, *optional*):
+ If passed along, the model uses the previous state in all the blocks (which will give the output for the
+ `input_ids` provided as if the model add `state_input_ids + input_ids` as context).
+ use_cache (`bool`, *optional*):
+ If set to `True`, the last state is returned and can be used to quickly generate the next logits.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare RWKV Model transformer outputting raw hidden-states without any specific head on top.",
+ RWKV_START_DOCSTRING,
+)
+class RwkvModel(RwkvPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
+ self.blocks = nn.ModuleList([RwkvBlock(config, layer_id=idx) for idx in range(config.num_hidden_layers)])
+ self.ln_out = nn.LayerNorm(config.hidden_size)
+
+ self.layers_are_rescaled = False
+
+ self.gradient_checkpointing = False
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings
+
+ def set_input_embeddings(self, new_embeddings):
+ self.embeddings = new_embeddings
+
+ @add_start_docstrings_to_model_forward(RWKV_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=RwkvOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None, # noqa
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ state: Optional[List[torch.FloatTensor]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, RwkvOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if self.training == self.layers_are_rescaled:
+ self._rescale_layers()
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is None and inputs_embeds is None:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embeddings(input_ids)
+
+ if use_cache and state is None:
+ shape = (inputs_embeds.size(0), self.config.hidden_size, self.config.num_hidden_layers)
+ state = [
+ torch.zeros(
+ *shape, dtype=inputs_embeds.dtype if i <= 1 else torch.float32, device=inputs_embeds.device
+ )
+ for i in range(5)
+ ]
+ state[4] -= 1e30
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ hidden_states = inputs_embeds
+
+ all_self_attentions = () if output_attentions else None
+ all_hidden_states = () if output_hidden_states else None
+ for idx, block in enumerate(self.blocks):
+ if self.gradient_checkpointing and self.training:
+ hidden_states, state, attentions = self._gradient_checkpointing_func(
+ block.__call__, hidden_states, state, use_cache, output_attentions
+ )
+ else:
+ hidden_states, state, attentions = block(
+ hidden_states, state=state, use_cache=use_cache, output_attentions=output_attentions
+ )
+
+ if (
+ self.layers_are_rescaled
+ and self.config.rescale_every > 0
+ and (idx + 1) % self.config.rescale_every == 0
+ ):
+ hidden_states = hidden_states / 2
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (attentions,)
+
+ hidden_states = self.ln_out(hidden_states)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(x for x in [hidden_states, state, all_hidden_states, all_self_attentions] if x is not None)
+
+ return RwkvOutput(
+ last_hidden_state=hidden_states,
+ state=state,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+ def _rescale_layers(self):
+ # Layers should be rescaled for inference only.
+ if self.layers_are_rescaled == (not self.training):
+ return
+ if self.config.rescale_every > 0:
+ with torch.no_grad():
+ for block_id, block in enumerate(self.blocks):
+ if self.training:
+ block.attention.output.weight.mul_(2 ** int(block_id // self.config.rescale_every))
+ block.feed_forward.value.weight.mul_(2 ** int(block_id // self.config.rescale_every))
+ else:
+ # Deal with quantization statistics
+ if hasattr(block.attention.output.weight, "SCB"):
+ block.attention.output.weight.SCB.div_(2 ** int(block_id // self.config.rescale_every))
+ block.feed_forward.value.weight.SCB.div_(2 ** int(block_id // self.config.rescale_every))
+ elif hasattr(block.attention.output.weight, "quant_state"):
+ self._bnb_4bit_dequantize_and_rescale(block.attention.output, block_id)
+ self._bnb_4bit_dequantize_and_rescale(block.feed_forward.value, block_id)
+ else:
+ block.attention.output.weight.div_(2 ** int(block_id // self.config.rescale_every))
+ block.feed_forward.value.weight.div_(2 ** int(block_id // self.config.rescale_every))
+
+ self.layers_are_rescaled = not self.training
+
+ def _bnb_4bit_dequantize_and_rescale(self, target_layer, block_id):
+ r"""
+ Perform the dequantization and rescaling of the weights of a given layer. After that operation the layer will
+ be quantized again.
+ """
+ if not is_bitsandbytes_available():
+ raise ImportError("Please install bitsandbytes to use this method.")
+ import bitsandbytes as bnb
+
+ dequant_weights = bnb.functional.dequantize_4bit(target_layer.weight.data, target_layer.weight.quant_state)
+
+ dequant_weights.div_(2 ** int(block_id // self.config.rescale_every))
+
+ # re-quantize the model:
+ # we need to put it first on CPU then back to the device
+ # this will create an overhead :/
+ # We set requires_grad=False as we cannot compute gradients on top of 4bit parameters anyway and to avoid
+ # bugs with bnb
+ quant_weight = bnb.nn.Params4bit(dequant_weights.to("cpu"), requires_grad=False).to(dequant_weights.device)
+ setattr(target_layer, "weight", quant_weight)
+
+
+@add_start_docstrings(
+ """
+ The RWKV Model transformer with a language modeling head on top (linear layer with weights tied to the input
+ embeddings).
+ """,
+ RWKV_START_DOCSTRING,
+)
+class RwkvForCausalLM(RwkvPreTrainedModel):
+ _tied_weights_keys = ["head.weight"]
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.rwkv = RwkvModel(config)
+ self.head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.head = new_embeddings
+
+ def generate(self, *args, **kwargs):
+ # Thin wrapper to raise exceptions when trying to generate with methods that manipulate `past_key_values`.
+ # RWKV is one of the few models that don't have it (it has `state` instead, which has different properties and
+ # usage).
+ try:
+ gen_output = super().generate(*args, **kwargs)
+ except AttributeError as exc:
+ # Expected exception: "AttributeError: '(object name)' object has no attribute 'past_key_values'"
+ if "past_key_values" in str(exc):
+ raise AttributeError(
+ "You tried to call `generate` with a decoding strategy that manipulates `past_key_values`. RWKV "
+ "doesn't have that attribute, try another generation strategy instead. For the available "
+ "generation strategies, check this doc: https://huggingface.co/docs/transformers/en/generation_strategies#decoding-strategies"
+ )
+ else:
+ raise exc
+ return gen_output
+
+ def prepare_inputs_for_generation(self, input_ids, state=None, inputs_embeds=None, **kwargs):
+ # only last token for inputs_ids if the state is passed along.
+ if state is not None:
+ input_ids = input_ids[:, -1].unsqueeze(-1)
+
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
+ if inputs_embeds is not None and state is None:
+ model_inputs = {"inputs_embeds": inputs_embeds}
+ else:
+ model_inputs = {"input_ids": input_ids}
+
+ model_inputs["state"] = state
+ return model_inputs
+
+ @add_start_docstrings_to_model_forward(RWKV_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=RwkvCausalLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None, # noqa
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ state: Optional[List[torch.FloatTensor]] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, RwkvCausalLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ rwkv_outputs = self.rwkv(
+ input_ids,
+ inputs_embeds=inputs_embeds,
+ state=state,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = rwkv_outputs[0]
+
+ logits = self.head(hidden_states)
+
+ loss = None
+ if labels is not None:
+ # move labels to correct device to enable model parallelism
+ labels = labels.to(logits.device)
+ # Shift so that tokens < n predict n
+ shift_logits = logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous()
+ # Flatten the tokens
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + rwkv_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return RwkvCausalLMOutput(
+ loss=loss,
+ logits=logits,
+ state=rwkv_outputs.state,
+ hidden_states=rwkv_outputs.hidden_states,
+ attentions=rwkv_outputs.attentions,
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/timesformer/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/timesformer/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f777a11ad1bdcf5403f06cad65fdce320b1c3d9d
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/timesformer/__init__.py
@@ -0,0 +1,55 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
+
+
+_import_structure = {
+ "configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_timesformer"] = [
+ "TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TimesformerModel",
+ "TimesformerForVideoClassification",
+ "TimesformerPreTrainedModel",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_timesformer import (
+ TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TimesformerForVideoClassification,
+ TimesformerModel,
+ TimesformerPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/timesformer/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/timesformer/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c9955ba349ff9d6d71255e4b71efa14138a1956a
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/timesformer/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/timesformer/__pycache__/configuration_timesformer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/timesformer/__pycache__/configuration_timesformer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..02a088fe98394f7b2f8afb9b126bf54792ab6078
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/timesformer/__pycache__/configuration_timesformer.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/timesformer/__pycache__/convert_timesformer_to_pytorch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/timesformer/__pycache__/convert_timesformer_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f7bdad6c048c8779ea5c862d79b18898f2e174ab
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/timesformer/__pycache__/convert_timesformer_to_pytorch.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/timesformer/__pycache__/modeling_timesformer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/timesformer/__pycache__/modeling_timesformer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cfcca3e33f4ad95c01f5bb7209c5a5c8d5d17842
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/timesformer/__pycache__/modeling_timesformer.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/timesformer/configuration_timesformer.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/timesformer/configuration_timesformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..79a86b7b5b370d84d176a4eb1d7d890178021e61
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/timesformer/configuration_timesformer.py
@@ -0,0 +1,129 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" TimeSformer model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class TimesformerConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`TimesformerModel`]. It is used to instantiate a
+ TimeSformer model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the TimeSformer
+ [facebook/timesformer-base-finetuned-k600](https://huggingface.co/facebook/timesformer-base-finetuned-k600)
+ architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ image_size (`int`, *optional*, defaults to 224):
+ The size (resolution) of each image.
+ patch_size (`int`, *optional*, defaults to 16):
+ The size (resolution) of each patch.
+ num_channels (`int`, *optional*, defaults to 3):
+ The number of input channels.
+ num_frames (`int`, *optional*, defaults to 8):
+ The number of frames in each video.
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
+ The epsilon used by the layer normalization layers.
+ qkv_bias (`bool`, *optional*, defaults to `True`):
+ Whether to add a bias to the queries, keys and values.
+ attention_type (`str`, *optional*, defaults to `"divided_space_time"`):
+ The attention type to use. Must be one of `"divided_space_time"`, `"space_only"`, `"joint_space_time"`.
+ drop_path_rate (`float`, *optional*, defaults to 0):
+ The dropout ratio for stochastic depth.
+
+ Example:
+
+ ```python
+ >>> from transformers import TimesformerConfig, TimesformerModel
+
+ >>> # Initializing a TimeSformer timesformer-base style configuration
+ >>> configuration = TimesformerConfig()
+
+ >>> # Initializing a model from the configuration
+ >>> model = TimesformerModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "timesformer"
+
+ def __init__(
+ self,
+ image_size=224,
+ patch_size=16,
+ num_channels=3,
+ num_frames=8,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.0,
+ attention_probs_dropout_prob=0.0,
+ initializer_range=0.02,
+ layer_norm_eps=1e-6,
+ qkv_bias=True,
+ attention_type="divided_space_time",
+ drop_path_rate=0,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.num_frames = num_frames
+
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.intermediate_size = intermediate_size
+ self.hidden_act = hidden_act
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.qkv_bias = qkv_bias
+
+ self.attention_type = attention_type
+ self.drop_path_rate = drop_path_rate
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/timesformer/convert_timesformer_to_pytorch.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/timesformer/convert_timesformer_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..ce4d13421ffddac5080420134fe2f342827a7c06
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/timesformer/convert_timesformer_to_pytorch.py
@@ -0,0 +1,253 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert TimeSformer checkpoints from the original repository: https://github.com/MCG-NJU/TimeSformer"""
+
+import argparse
+import json
+
+import gdown
+import numpy as np
+import torch
+from huggingface_hub import hf_hub_download
+
+from transformers import TimesformerConfig, TimesformerForVideoClassification, VideoMAEImageProcessor
+
+
+def get_timesformer_config(model_name):
+ config = TimesformerConfig()
+
+ if "large" in model_name:
+ config.num_frames = 96
+
+ if "hr" in model_name:
+ config.num_frames = 16
+ config.image_size = 448
+
+ repo_id = "huggingface/label-files"
+ if "k400" in model_name:
+ config.num_labels = 400
+ filename = "kinetics400-id2label.json"
+ elif "k600" in model_name:
+ config.num_labels = 600
+ filename = "kinetics600-id2label.json"
+ elif "ssv2" in model_name:
+ config.num_labels = 174
+ filename = "something-something-v2-id2label.json"
+ else:
+ raise ValueError("Model name should either contain 'k400', 'k600' or 'ssv2'.")
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
+ id2label = {int(k): v for k, v in id2label.items()}
+ config.id2label = id2label
+ config.label2id = {v: k for k, v in id2label.items()}
+
+ return config
+
+
+def rename_key(name):
+ if "encoder." in name:
+ name = name.replace("encoder.", "")
+ if "cls_token" in name:
+ name = name.replace("cls_token", "timesformer.embeddings.cls_token")
+ if "pos_embed" in name:
+ name = name.replace("pos_embed", "timesformer.embeddings.position_embeddings")
+ if "time_embed" in name:
+ name = name.replace("time_embed", "timesformer.embeddings.time_embeddings")
+ if "patch_embed.proj" in name:
+ name = name.replace("patch_embed.proj", "timesformer.embeddings.patch_embeddings.projection")
+ if "patch_embed.norm" in name:
+ name = name.replace("patch_embed.norm", "timesformer.embeddings.norm")
+ if "blocks" in name:
+ name = name.replace("blocks", "timesformer.encoder.layer")
+ if "attn.proj" in name:
+ name = name.replace("attn.proj", "attention.output.dense")
+ if "attn" in name and "bias" not in name and "temporal" not in name:
+ name = name.replace("attn", "attention.self")
+ if "attn" in name and "temporal" not in name:
+ name = name.replace("attn", "attention.attention")
+ if "temporal_norm1" in name:
+ name = name.replace("temporal_norm1", "temporal_layernorm")
+ if "temporal_attn.proj" in name:
+ name = name.replace("temporal_attn", "temporal_attention.output.dense")
+ if "temporal_fc" in name:
+ name = name.replace("temporal_fc", "temporal_dense")
+ if "norm1" in name and "temporal" not in name:
+ name = name.replace("norm1", "layernorm_before")
+ if "norm2" in name:
+ name = name.replace("norm2", "layernorm_after")
+ if "mlp.fc1" in name:
+ name = name.replace("mlp.fc1", "intermediate.dense")
+ if "mlp.fc2" in name:
+ name = name.replace("mlp.fc2", "output.dense")
+ if "norm.weight" in name and "fc" not in name and "temporal" not in name:
+ name = name.replace("norm.weight", "timesformer.layernorm.weight")
+ if "norm.bias" in name and "fc" not in name and "temporal" not in name:
+ name = name.replace("norm.bias", "timesformer.layernorm.bias")
+ if "head" in name:
+ name = name.replace("head", "classifier")
+
+ return name
+
+
+def convert_state_dict(orig_state_dict, config):
+ for key in orig_state_dict.copy().keys():
+ val = orig_state_dict.pop(key)
+
+ if key.startswith("model."):
+ key = key.replace("model.", "")
+
+ if "qkv" in key:
+ key_split = key.split(".")
+ layer_num = int(key_split[1])
+ prefix = "timesformer.encoder.layer."
+ if "temporal" in key:
+ postfix = ".temporal_attention.attention.qkv."
+ else:
+ postfix = ".attention.attention.qkv."
+ if "weight" in key:
+ orig_state_dict[f"{prefix}{layer_num}{postfix}weight"] = val
+ else:
+ orig_state_dict[f"{prefix}{layer_num}{postfix}bias"] = val
+ else:
+ orig_state_dict[rename_key(key)] = val
+
+ return orig_state_dict
+
+
+# We will verify our results on a video of eating spaghetti
+# Frame indices used: [164 168 172 176 181 185 189 193 198 202 206 210 215 219 223 227]
+def prepare_video():
+ file = hf_hub_download(
+ repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti.npy", repo_type="dataset"
+ )
+ video = np.load(file)
+ return list(video)
+
+
+def convert_timesformer_checkpoint(checkpoint_url, pytorch_dump_folder_path, model_name, push_to_hub):
+ config = get_timesformer_config(model_name)
+
+ model = TimesformerForVideoClassification(config)
+
+ # download original checkpoint, hosted on Google Drive
+ output = "pytorch_model.bin"
+ gdown.cached_download(checkpoint_url, output, quiet=False)
+ files = torch.load(output, map_location="cpu")
+ if "model" in files:
+ state_dict = files["model"]
+ elif "module" in files:
+ state_dict = files["module"]
+ else:
+ state_dict = files["model_state"]
+ new_state_dict = convert_state_dict(state_dict, config)
+
+ model.load_state_dict(new_state_dict)
+ model.eval()
+
+ # verify model on basic input
+ image_processor = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5])
+ video = prepare_video()
+ inputs = image_processor(video[:8], return_tensors="pt")
+
+ outputs = model(**inputs)
+ logits = outputs.logits
+
+ model_names = [
+ # Kinetics-400 checkpoints (hr = high resolution input of 448px instead of 224px)
+ "timesformer-base-finetuned-k400",
+ "timesformer-large-finetuned-k400",
+ "timesformer-hr-finetuned-k400",
+ # Kinetics-600 checkpoints (hr = high resolution input of 448px instead of 224px)
+ "timesformer-base-finetuned-k600",
+ "timesformer-large-finetuned-k600",
+ "timesformer-hr-finetuned-k600",
+ # Something-Something-v2 checkpoints (hr = high resolution input of 448px instead of 224px)
+ "timesformer-base-finetuned-ssv2",
+ "timesformer-large-finetuned-ssv2",
+ "timesformer-hr-finetuned-ssv2",
+ ]
+
+ # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
+ if model_name == "timesformer-base-finetuned-k400":
+ expected_shape = torch.Size([1, 400])
+ expected_slice = torch.tensor([-0.3016, -0.7713, -0.4205])
+ elif model_name == "timesformer-base-finetuned-k600":
+ expected_shape = torch.Size([1, 600])
+ expected_slice = torch.tensor([-0.7267, -0.7466, 3.2404])
+ elif model_name == "timesformer-base-finetuned-ssv2":
+ expected_shape = torch.Size([1, 174])
+ expected_slice = torch.tensor([-0.9059, 0.6433, -3.1457])
+ elif model_name == "timesformer-large-finetuned-k400":
+ expected_shape = torch.Size([1, 400])
+ expected_slice = torch.tensor([0, 0, 0])
+ elif model_name == "timesformer-large-finetuned-k600":
+ expected_shape = torch.Size([1, 600])
+ expected_slice = torch.tensor([0, 0, 0])
+ elif model_name == "timesformer-large-finetuned-ssv2":
+ expected_shape = torch.Size([1, 174])
+ expected_slice = torch.tensor([0, 0, 0])
+ elif model_name == "timesformer-hr-finetuned-k400":
+ expected_shape = torch.Size([1, 400])
+ expected_slice = torch.tensor([-0.9617, -3.7311, -3.7708])
+ elif model_name == "timesformer-hr-finetuned-k600":
+ expected_shape = torch.Size([1, 600])
+ expected_slice = torch.tensor([2.5273, 0.7127, 1.8848])
+ elif model_name == "timesformer-hr-finetuned-ssv2":
+ expected_shape = torch.Size([1, 174])
+ expected_slice = torch.tensor([-3.6756, -0.7513, 0.7180])
+ else:
+ raise ValueError(f"Model name not supported. Should be one of {model_names}")
+
+ # verify logits
+ assert logits.shape == expected_shape
+ assert torch.allclose(logits[0, :3], expected_slice, atol=1e-4)
+ print("Logits ok!")
+
+ if pytorch_dump_folder_path is not None:
+ print(f"Saving model and image processor to {pytorch_dump_folder_path}")
+ image_processor.save_pretrained(pytorch_dump_folder_path)
+ model.save_pretrained(pytorch_dump_folder_path)
+
+ if push_to_hub:
+ print("Pushing to the hub...")
+ model.push_to_hub(f"fcakyon/{model_name}")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--checkpoint_url",
+ default="https://drive.google.com/u/1/uc?id=17yvuYp9L4mn-HpIcK5Zo6K3UoOy1kA5l&export=download",
+ type=str,
+ help=(
+ "URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"
+ " download link."
+ ),
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path",
+ default="",
+ type=str,
+ help="Path to the output PyTorch model directory.",
+ )
+ parser.add_argument("--model_name", default="timesformer-base-finetuned-k400", type=str, help="Name of the model.")
+ parser.add_argument(
+ "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
+ )
+
+ args = parser.parse_args()
+ convert_timesformer_checkpoint(
+ args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/timesformer/modeling_timesformer.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/timesformer/modeling_timesformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..337447250842ee0cb0c83e66041d64e1ff04880d
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/timesformer/modeling_timesformer.py
@@ -0,0 +1,816 @@
+# coding=utf-8
+# Copyright 2022 Meta and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch TimeSformer model."""
+
+
+import collections
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.nn.functional
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import BaseModelOutput, ImageClassifierOutput
+from ...modeling_utils import PreTrainedModel
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+from .configuration_timesformer import TimesformerConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "TimesformerConfig"
+_CHECKPOINT_FOR_DOC = "facebook/timesformer"
+
+
+from ..deprecated._archive_maps import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+# Adapted from https://github.com/facebookresearch/TimeSformer/blob/a5ef29a7b7264baff199a30b3306ac27de901133/timesformer/models/vit.py#L155
+class TimesformerPatchEmbeddings(nn.Module):
+ """Image to Patch Embedding"""
+
+ def __init__(self, config):
+ super().__init__()
+
+ image_size = config.image_size
+ patch_size = config.patch_size
+
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
+
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_patches = num_patches
+
+ self.projection = nn.Conv2d(config.num_channels, config.hidden_size, kernel_size=patch_size, stride=patch_size)
+
+ def forward(self, pixel_values):
+ batch_size, num_frames, num_channels, height, width = pixel_values.shape
+ pixel_values = pixel_values.reshape(batch_size * num_frames, num_channels, height, width)
+
+ embeddings = self.projection(pixel_values)
+ patch_width = embeddings.size(-1)
+ embeddings = embeddings.flatten(2).transpose(1, 2)
+ return embeddings, num_frames, patch_width
+
+
+class TimesformerEmbeddings(nn.Module):
+ """
+ Construct the patch and position embeddings.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+
+ embed_dim = config.hidden_size
+ num_frames = config.num_frames
+ drop_rate = config.hidden_dropout_prob
+ attention_type = config.attention_type
+
+ self.attention_type = attention_type
+ self.patch_embeddings = TimesformerPatchEmbeddings(config)
+ self.num_patches = self.patch_embeddings.num_patches
+
+ # Positional Embeddings
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
+ self.position_embeddings = nn.Parameter(torch.zeros(1, self.num_patches + 1, embed_dim))
+ self.pos_drop = nn.Dropout(p=drop_rate)
+ if attention_type != "space_only":
+ self.time_embeddings = nn.Parameter(torch.zeros(1, num_frames, embed_dim))
+ self.time_drop = nn.Dropout(p=drop_rate)
+
+ def forward(self, pixel_values):
+ batch_size = pixel_values.shape[0]
+
+ # create patch embeddings
+ embeddings, num_frames, patch_width = self.patch_embeddings(pixel_values)
+
+ cls_tokens = self.cls_token.expand(embeddings.size(0), -1, -1)
+ embeddings = torch.cat((cls_tokens, embeddings), dim=1)
+
+ # resizing the positional embeddings in case they don't match the input at inference
+ if embeddings.size(1) != self.position_embeddings.size(1):
+ position_embeddings = self.position_embeddings
+ cls_pos_embed = position_embeddings[0, 0, :].unsqueeze(0).unsqueeze(1)
+ other_pos_embed = position_embeddings[0, 1:, :].unsqueeze(0).transpose(1, 2)
+ patch_num = int(other_pos_embed.size(2) ** 0.5)
+ patch_height = embeddings.size(1) // patch_width
+ other_pos_embed = other_pos_embed.reshape(1, embeddings.size(2), patch_num, patch_num)
+ new_pos_embed = nn.functional.interpolate(
+ other_pos_embed, size=(patch_height, patch_width), mode="nearest"
+ )
+ new_pos_embed = new_pos_embed.flatten(2)
+ new_pos_embed = new_pos_embed.transpose(1, 2)
+ new_pos_embed = torch.cat((cls_pos_embed, new_pos_embed), 1)
+ embeddings = embeddings + new_pos_embed
+ else:
+ embeddings = embeddings + self.position_embeddings
+ embeddings = self.pos_drop(embeddings)
+
+ # Time Embeddings
+ if self.attention_type != "space_only":
+ cls_tokens = embeddings[:batch_size, 0, :].unsqueeze(1)
+ embeddings = embeddings[:, 1:]
+ _, patch_height, patch_width = embeddings.shape
+ embeddings = (
+ embeddings.reshape(batch_size, num_frames, patch_height, patch_width)
+ .permute(0, 2, 1, 3)
+ .reshape(batch_size * patch_height, num_frames, patch_width)
+ )
+ # Resizing time embeddings in case they don't match
+ if num_frames != self.time_embeddings.size(1):
+ time_embeddings = self.time_embeddings.transpose(1, 2)
+ new_time_embeddings = nn.functional.interpolate(time_embeddings, size=(num_frames), mode="nearest")
+ new_time_embeddings = new_time_embeddings.transpose(1, 2)
+ embeddings = embeddings + new_time_embeddings
+ else:
+ embeddings = embeddings + self.time_embeddings
+ embeddings = self.time_drop(embeddings)
+ embeddings = embeddings.view(batch_size, patch_height, num_frames, patch_width).reshape(
+ batch_size, patch_height * num_frames, patch_width
+ )
+ embeddings = torch.cat((cls_tokens, embeddings), dim=1)
+
+ return embeddings
+
+
+# Copied from transformers.models.beit.modeling_beit.drop_path
+def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
+ """
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
+
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
+ argument.
+ """
+ if drop_prob == 0.0 or not training:
+ return input
+ keep_prob = 1 - drop_prob
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
+ random_tensor.floor_() # binarize
+ output = input.div(keep_prob) * random_tensor
+ return output
+
+
+# Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->TimeSformer
+class TimeSformerDropPath(nn.Module):
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
+
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
+ super().__init__()
+ self.drop_prob = drop_prob
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ return drop_path(hidden_states, self.drop_prob, self.training)
+
+ def extra_repr(self) -> str:
+ return "p={}".format(self.drop_prob)
+
+
+# Adapted from https://github.com/facebookresearch/TimeSformer/blob/a5ef29a7b7264baff199a30b3306ac27de901133/timesformer/models/vit.py#L57
+class TimesformerSelfAttention(nn.Module):
+ def __init__(self, config: TimesformerConfig):
+ super().__init__()
+
+ num_heads = config.num_attention_heads
+ qkv_bias = config.qkv_bias
+ attention_dropout_prob = config.attention_probs_dropout_prob
+
+ self.num_heads = num_heads
+ head_dim = config.hidden_size // num_heads
+ self.scale = head_dim**-0.5
+ self.qkv = nn.Linear(config.hidden_size, config.hidden_size * 3, bias=qkv_bias)
+ self.attn_drop = nn.Dropout(attention_dropout_prob)
+
+ def forward(self, hidden_states, output_attentions: bool = False):
+ batch_size, hidden_size, num_channels = hidden_states.shape
+ qkv = (
+ self.qkv(hidden_states)
+ .reshape(batch_size, hidden_size, 3, self.num_heads, num_channels // self.num_heads)
+ .permute(2, 0, 3, 1, 4)
+ )
+ query, key, value = qkv[0], qkv[1], qkv[2]
+
+ attention_probs = (query @ key.transpose(-2, -1)) * self.scale
+ attention_probs = attention_probs.softmax(dim=-1)
+ attention_probs = self.attn_drop(attention_probs)
+
+ context_layer = (attention_probs @ value).transpose(1, 2).reshape(batch_size, hidden_size, num_channels)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ return outputs
+
+
+class TimesformerSelfOutput(nn.Module):
+ """
+ The residual connection is defined in TimesformerLayer instead of here (as is the case with other models), due to
+ the layernorm applied before each block.
+ """
+
+ def __init__(self, config: TimesformerConfig) -> None:
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ return hidden_states
+
+
+class TimeSformerAttention(nn.Module):
+ def __init__(self, config: TimesformerConfig) -> None:
+ super().__init__()
+ self.attention = TimesformerSelfAttention(config)
+ self.output = TimesformerSelfOutput(config)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ output_attentions: bool = False,
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
+ self_outputs = self.attention(hidden_states, output_attentions)
+
+ attention_output = self.output(self_outputs[0])
+
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+# Adapted from https://github.com/facebookresearch/TimeSformer/blob/a5ef29a7b7264baff199a30b3306ac27de901133/timesformer/models/vit.py#L39
+class TimesformerIntermediate(nn.Module):
+ def __init__(self, config: TimesformerConfig) -> None:
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ return hidden_states
+
+
+class TimesformerOutput(nn.Module):
+ def __init__(self, config: TimesformerConfig) -> None:
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ return hidden_states
+
+
+# Adapted from https://github.com/facebookresearch/TimeSformer/blob/a5ef29a7b7264baff199a30b3306ac27de901133/timesformer/models/vit.py#L89
+class TimesformerLayer(nn.Module):
+ def __init__(self, config: TimesformerConfig, layer_index: int) -> None:
+ super().__init__()
+
+ attention_type = config.attention_type
+
+ drop_path_rates = [
+ x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)
+ ] # stochastic depth decay rule
+ drop_path_rate = drop_path_rates[layer_index]
+
+ self.drop_path = TimeSformerDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
+ self.attention = TimeSformerAttention(config)
+ self.intermediate = TimesformerIntermediate(config)
+ self.output = TimesformerOutput(config)
+ self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ self.config = config
+ self.attention_type = attention_type
+ if attention_type not in ["divided_space_time", "space_only", "joint_space_time"]:
+ raise ValueError("Unknown attention type: {}".format(attention_type))
+
+ # Temporal Attention Parameters
+ if self.attention_type == "divided_space_time":
+ self.temporal_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.temporal_attention = TimeSformerAttention(config)
+ self.temporal_dense = nn.Linear(config.hidden_size, config.hidden_size)
+
+ def forward(self, hidden_states: torch.Tensor, output_attentions: bool = False):
+ num_frames = self.config.num_frames
+ num_patch_width = self.config.image_size // self.config.patch_size
+ batch_size = hidden_states.shape[0]
+ num_spatial_tokens = (hidden_states.size(1) - 1) // num_frames
+ num_patch_height = num_spatial_tokens // num_patch_width
+
+ if self.attention_type in ["space_only", "joint_space_time"]:
+ self_attention_outputs = self.attention(
+ self.layernorm_before(hidden_states), output_attentions=output_attentions
+ )
+ attention_output = self_attention_outputs[0]
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ hidden_states = hidden_states + self.drop_path(attention_output)
+
+ layer_output = self.layernorm_after(hidden_states)
+ layer_output = self.intermediate(layer_output)
+ layer_output = self.output(layer_output)
+ layer_output = hidden_states + self.drop_path(layer_output)
+
+ outputs = (layer_output,) + outputs
+
+ return outputs
+
+ elif self.attention_type == "divided_space_time":
+ # Temporal
+ temporal_embedding = hidden_states[:, 1:, :]
+ temporal_embedding = temporal_embedding.reshape(
+ batch_size, num_patch_height, num_patch_width, num_frames, temporal_embedding.shape[2]
+ ).reshape(batch_size * num_patch_height * num_patch_width, num_frames, temporal_embedding.shape[2])
+
+ temporal_attention_outputs = self.temporal_attention(
+ self.temporal_layernorm(temporal_embedding),
+ )
+ attention_output = temporal_attention_outputs[0]
+
+ residual_temporal = self.drop_path(attention_output)
+
+ residual_temporal = residual_temporal.reshape(
+ batch_size, num_patch_height, num_patch_width, num_frames, residual_temporal.shape[2]
+ ).reshape(batch_size, num_patch_height * num_patch_width * num_frames, residual_temporal.shape[2])
+ residual_temporal = self.temporal_dense(residual_temporal)
+ temporal_embedding = hidden_states[:, 1:, :] + residual_temporal
+
+ # Spatial
+ init_cls_token = hidden_states[:, 0, :].unsqueeze(1)
+ cls_token = init_cls_token.repeat(1, num_frames, 1)
+ cls_token = cls_token.reshape(batch_size * num_frames, 1, cls_token.shape[2])
+ spatial_embedding = temporal_embedding
+ spatial_embedding = (
+ spatial_embedding.reshape(
+ batch_size, num_patch_height, num_patch_width, num_frames, spatial_embedding.shape[2]
+ )
+ .permute(0, 3, 1, 2, 4)
+ .reshape(batch_size * num_frames, num_patch_height * num_patch_width, spatial_embedding.shape[2])
+ )
+ spatial_embedding = torch.cat((cls_token, spatial_embedding), 1)
+
+ spatial_attention_outputs = self.attention(
+ self.layernorm_before(spatial_embedding), output_attentions=output_attentions
+ )
+ attention_output = spatial_attention_outputs[0]
+ outputs = spatial_attention_outputs[1:] # add self attentions if we output attention weights
+
+ residual_spatial = self.drop_path(attention_output)
+
+ # Taking care of CLS token
+ cls_token = residual_spatial[:, 0, :]
+ cls_token = cls_token.reshape(batch_size, num_frames, cls_token.shape[1])
+ cls_token = torch.mean(cls_token, 1, True) # averaging for every frame
+ residual_spatial = residual_spatial[:, 1:, :]
+ residual_spatial = (
+ residual_spatial.reshape(
+ batch_size, num_frames, num_patch_height, num_patch_width, residual_spatial.shape[2]
+ )
+ .permute(0, 2, 3, 1, 4)
+ .reshape(batch_size, num_patch_height * num_patch_width * num_frames, residual_spatial.shape[2])
+ )
+ residual = residual_spatial
+ hidden_states = temporal_embedding
+
+ # Mlp
+ hidden_states = torch.cat((init_cls_token, hidden_states), 1) + torch.cat((cls_token, residual), 1)
+ layer_output = self.layernorm_after(hidden_states)
+ layer_output = self.intermediate(layer_output)
+ layer_output = self.output(layer_output)
+ layer_output = hidden_states + self.drop_path(layer_output)
+
+ outputs = (layer_output,) + outputs
+
+ return outputs
+
+
+class TimesformerEncoder(nn.Module):
+ def __init__(self, config: TimesformerConfig) -> None:
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList([TimesformerLayer(config, ind) for ind in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ) -> Union[tuple, BaseModelOutput]:
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(hidden_states, output_attentions)
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+
+class TimesformerPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = TimesformerConfig
+ base_model_prefix = "timesformer"
+ main_input_name = "pixel_values"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
+ nn.init.trunc_normal_(module.weight, std=self.config.initializer_range)
+ if module.bias is not None:
+ nn.init.constant_(module.bias, 0)
+ elif isinstance(module, nn.LayerNorm):
+ nn.init.constant_(module.bias, 0)
+ nn.init.constant_(module.weight, 1.0)
+ elif isinstance(module, TimesformerEmbeddings):
+ nn.init.trunc_normal_(module.cls_token, std=self.config.initializer_range)
+ nn.init.trunc_normal_(module.position_embeddings, std=self.config.initializer_range)
+ module.patch_embeddings.apply(self._init_weights)
+
+
+TIMESFORMER_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`TimesformerConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+TIMESFORMER_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
+ [`VideoMAEImageProcessor.preprocess`] for details.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare TimeSformer Model transformer outputting raw hidden-states without any specific head on top.",
+ TIMESFORMER_START_DOCSTRING,
+)
+class TimesformerModel(TimesformerPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = TimesformerEmbeddings(config)
+ self.encoder = TimesformerEncoder(config)
+
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.patch_embeddings
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(TIMESFORMER_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: torch.FloatTensor,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> import av
+ >>> import numpy as np
+
+ >>> from transformers import AutoImageProcessor, TimesformerModel
+ >>> from huggingface_hub import hf_hub_download
+
+ >>> np.random.seed(0)
+
+
+ >>> def read_video_pyav(container, indices):
+ ... '''
+ ... Decode the video with PyAV decoder.
+ ... Args:
+ ... container (`av.container.input.InputContainer`): PyAV container.
+ ... indices (`List[int]`): List of frame indices to decode.
+ ... Returns:
+ ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
+ ... '''
+ ... frames = []
+ ... container.seek(0)
+ ... start_index = indices[0]
+ ... end_index = indices[-1]
+ ... for i, frame in enumerate(container.decode(video=0)):
+ ... if i > end_index:
+ ... break
+ ... if i >= start_index and i in indices:
+ ... frames.append(frame)
+ ... return np.stack([x.to_ndarray(format="rgb24") for x in frames])
+
+
+ >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
+ ... '''
+ ... Sample a given number of frame indices from the video.
+ ... Args:
+ ... clip_len (`int`): Total number of frames to sample.
+ ... frame_sample_rate (`int`): Sample every n-th frame.
+ ... seg_len (`int`): Maximum allowed index of sample's last frame.
+ ... Returns:
+ ... indices (`List[int]`): List of sampled frame indices
+ ... '''
+ ... converted_len = int(clip_len * frame_sample_rate)
+ ... end_idx = np.random.randint(converted_len, seg_len)
+ ... start_idx = end_idx - converted_len
+ ... indices = np.linspace(start_idx, end_idx, num=clip_len)
+ ... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
+ ... return indices
+
+
+ >>> # video clip consists of 300 frames (10 seconds at 30 FPS)
+ >>> file_path = hf_hub_download(
+ ... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
+ ... )
+ >>> container = av.open(file_path)
+
+ >>> # sample 8 frames
+ >>> indices = sample_frame_indices(clip_len=8, frame_sample_rate=4, seg_len=container.streams.video[0].frames)
+ >>> video = read_video_pyav(container, indices)
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base")
+ >>> model = TimesformerModel.from_pretrained("facebook/timesformer-base-finetuned-k400")
+
+ >>> # prepare video for the model
+ >>> inputs = image_processor(list(video), return_tensors="pt")
+
+ >>> # forward pass
+ >>> outputs = model(**inputs)
+ >>> last_hidden_states = outputs.last_hidden_state
+ >>> list(last_hidden_states.shape)
+ [1, 1569, 768]
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ embedding_output = self.embeddings(pixel_values)
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+ if self.layernorm is not None:
+ sequence_output = self.layernorm(sequence_output)
+
+ if not return_dict:
+ return (sequence_output,) + encoder_outputs[1:]
+
+ return BaseModelOutput(
+ last_hidden_state=sequence_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """TimeSformer Model transformer with a video classification head on top (a linear layer on top of the final hidden state
+of the [CLS] token) e.g. for ImageNet.""",
+ TIMESFORMER_START_DOCSTRING,
+)
+class TimesformerForVideoClassification(TimesformerPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.num_labels = config.num_labels
+ self.timesformer = TimesformerModel(config)
+
+ # Classifier head
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(TIMESFORMER_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=ImageClassifierOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, ImageClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> import av
+ >>> import torch
+ >>> import numpy as np
+
+ >>> from transformers import AutoImageProcessor, TimesformerForVideoClassification
+ >>> from huggingface_hub import hf_hub_download
+
+ >>> np.random.seed(0)
+
+
+ >>> def read_video_pyav(container, indices):
+ ... '''
+ ... Decode the video with PyAV decoder.
+ ... Args:
+ ... container (`av.container.input.InputContainer`): PyAV container.
+ ... indices (`List[int]`): List of frame indices to decode.
+ ... Returns:
+ ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
+ ... '''
+ ... frames = []
+ ... container.seek(0)
+ ... start_index = indices[0]
+ ... end_index = indices[-1]
+ ... for i, frame in enumerate(container.decode(video=0)):
+ ... if i > end_index:
+ ... break
+ ... if i >= start_index and i in indices:
+ ... frames.append(frame)
+ ... return np.stack([x.to_ndarray(format="rgb24") for x in frames])
+
+
+ >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
+ ... '''
+ ... Sample a given number of frame indices from the video.
+ ... Args:
+ ... clip_len (`int`): Total number of frames to sample.
+ ... frame_sample_rate (`int`): Sample every n-th frame.
+ ... seg_len (`int`): Maximum allowed index of sample's last frame.
+ ... Returns:
+ ... indices (`List[int]`): List of sampled frame indices
+ ... '''
+ ... converted_len = int(clip_len * frame_sample_rate)
+ ... end_idx = np.random.randint(converted_len, seg_len)
+ ... start_idx = end_idx - converted_len
+ ... indices = np.linspace(start_idx, end_idx, num=clip_len)
+ ... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
+ ... return indices
+
+
+ >>> # video clip consists of 300 frames (10 seconds at 30 FPS)
+ >>> file_path = hf_hub_download(
+ ... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
+ ... )
+ >>> container = av.open(file_path)
+
+ >>> # sample 8 frames
+ >>> indices = sample_frame_indices(clip_len=8, frame_sample_rate=1, seg_len=container.streams.video[0].frames)
+ >>> video = read_video_pyav(container, indices)
+
+ >>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics")
+ >>> model = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400")
+
+ >>> inputs = image_processor(list(video), return_tensors="pt")
+
+ >>> with torch.no_grad():
+ ... outputs = model(**inputs)
+ ... logits = outputs.logits
+
+ >>> # model predicts one of the 400 Kinetics-400 classes
+ >>> predicted_label = logits.argmax(-1).item()
+ >>> print(model.config.id2label[predicted_label])
+ eating spaghetti
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.timesformer(
+ pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0][:, 0]
+
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return ImageClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/tvp/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/tvp/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..63c0bd2717447137661a4c01596ba0f581f3504d
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/tvp/__init__.py
@@ -0,0 +1,80 @@
+# coding=utf-8
+# Copyright 2023 The Intel AIA Team Authors, and HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License=, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing=, software
+# distributed under the License is distributed on an "AS IS" BASIS=,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND=, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
+
+
+_import_structure = {
+ "configuration_tvp": [
+ "TVP_PRETRAINED_CONFIG_ARCHIVE_MAP",
+ "TvpConfig",
+ ],
+ "processing_tvp": ["TvpProcessor"],
+}
+
+try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["image_processing_tvp"] = ["TvpImageProcessor"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tvp"] = [
+ "TVP_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TvpModel",
+ "TvpPreTrainedModel",
+ "TvpForVideoGrounding",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_tvp import (
+ TVP_PRETRAINED_CONFIG_ARCHIVE_MAP,
+ TvpConfig,
+ )
+ from .processing_tvp import TvpProcessor
+
+ try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .image_processing_tvp import TvpImageProcessor
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tvp import (
+ TVP_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TvpForVideoGrounding,
+ TvpModel,
+ TvpPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/tvp/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/tvp/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..01feec271ad5c86ed87d5cef69c425fc73c7f170
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/tvp/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/tvp/__pycache__/configuration_tvp.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/tvp/__pycache__/configuration_tvp.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7640f8fb9b3d42e3d56a7914c00dfccab360916f
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/tvp/__pycache__/configuration_tvp.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/tvp/__pycache__/image_processing_tvp.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/tvp/__pycache__/image_processing_tvp.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b7e5c6546d8f89be9d1848006c1b702540a08c15
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/tvp/__pycache__/image_processing_tvp.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/tvp/__pycache__/modeling_tvp.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/tvp/__pycache__/modeling_tvp.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c154de08647dcc7357f413523e98e0aa7cdff9ac
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/tvp/__pycache__/modeling_tvp.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/tvp/__pycache__/processing_tvp.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/tvp/__pycache__/processing_tvp.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7ac55db058676af8a0b7dbdc8f96efe4ac16aaeb
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/tvp/__pycache__/processing_tvp.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/tvp/configuration_tvp.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/tvp/configuration_tvp.py
new file mode 100644
index 0000000000000000000000000000000000000000..85b7ac6a41cbccccbfb1fb62becbcac2f4f2b63e
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/tvp/configuration_tvp.py
@@ -0,0 +1,201 @@
+# coding=utf-8
+# Copyright 2023 The Intel AIA Team Authors, and HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License=, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing=, software
+# distributed under the License is distributed on an "AS IS" BASIS=,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND=, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" TVP model configuration"""
+
+import copy
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+from ..auto import CONFIG_MAPPING
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import TVP_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class TvpConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`TvpModel`]. It is used to instantiate an Tvp
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the Tvp
+ [Intel/tvp-base](https://huggingface.co/Intel/tvp-base) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ backbone_config (`PretrainedConfig` or `dict`, *optional*):
+ The configuration of the backbone model.
+ backbone (`str`, *optional*):
+ Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
+ will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
+ is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
+ use_pretrained_backbone (`bool`, *optional*, defaults to `False`):
+ Whether to use pretrained weights for the backbone.
+ use_timm_backbone (`bool`, *optional*, defaults to `False`):
+ Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers
+ library.
+ backbone_kwargs (`dict`, *optional*):
+ Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
+ e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
+ distance_loss_weight (`float`, *optional*, defaults to 1.0):
+ The weight of distance loss.
+ duration_loss_weight (`float`, *optional*, defaults to 0.1):
+ The weight of duration loss.
+ visual_prompter_type (`str`, *optional*, defaults to `"framepad"`):
+ Visual prompt type. The type of padding. Framepad means padding on each frame. Should be one of "framepad"
+ or "framedownpad"
+ visual_prompter_apply (`str`, *optional*, defaults to `"replace"`):
+ The way of applying visual prompt. Replace means use the value of prompt to change the original value in
+ visual inputs. Should be one of "replace", or "add", or "remove".
+ visual_prompt_size (`int`, *optional*, defaults to 96):
+ The size of visual prompt.
+ max_img_size (`int`, *optional*, defaults to 448):
+ The maximum size of frame.
+ num_frames (`int`, *optional*, defaults to 48):
+ The number of frames extracted from a video.
+ vocab_size (`int`, *optional*, defaults to 30522):
+ Vocabulary size of the Tvp text model. Defines the number of different tokens that can be represented by
+ the `inputs_ids` passed when calling [`TvpModel`].
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ max_grid_col_position_embeddings (`int`, *optional*, defaults to 100):
+ The largest number of horizontal patches from a video frame.
+ max_grid_row_position_embeddings (`int`, *optional*, defaults to 100):
+ The largest number of vertical patches from a video frame.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability of hidden layers.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability of attention layers.
+ """
+
+ model_type = "tvp"
+
+ def __init__(
+ self,
+ backbone_config=None,
+ backbone=None,
+ use_pretrained_backbone=False,
+ use_timm_backbone=False,
+ backbone_kwargs=None,
+ distance_loss_weight=1.0,
+ duration_loss_weight=0.1,
+ visual_prompter_type="framepad",
+ visual_prompter_apply="replace",
+ visual_prompt_size=96,
+ max_img_size=448,
+ num_frames=48,
+ vocab_size=30522,
+ hidden_size=768,
+ intermediate_size=3072,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ max_position_embeddings=512,
+ max_grid_col_position_embeddings=100,
+ max_grid_row_position_embeddings=100,
+ hidden_dropout_prob=0.1,
+ hidden_act="gelu",
+ layer_norm_eps=1e-12,
+ initializer_range=0.02,
+ attention_probs_dropout_prob=0.1,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ if use_pretrained_backbone:
+ raise ValueError("Pretrained backbones are not supported yet.")
+
+ if backbone_config is not None and backbone is not None:
+ raise ValueError("You can't specify both `backbone` and `backbone_config`.")
+
+ if backbone_config is None and backbone is None:
+ logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
+ backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage4"])
+ elif isinstance(backbone_config, dict):
+ backbone_model_type = backbone_config.get("model_type")
+ config_class = CONFIG_MAPPING[backbone_model_type]
+ backbone_config = config_class.from_dict(backbone_config)
+
+ if backbone_kwargs is not None and backbone_kwargs and backbone_config is not None:
+ raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.")
+
+ self.backbone_config = backbone_config
+ self.backbone = backbone
+ self.use_pretrained_backbone = use_pretrained_backbone
+ self.use_timm_backbone = use_timm_backbone
+ self.backbone_kwargs = backbone_kwargs
+ self.distance_loss_weight = distance_loss_weight
+ self.duration_loss_weight = duration_loss_weight
+ self.visual_prompter_type = visual_prompter_type
+ self.visual_prompter_apply = visual_prompter_apply
+ self.visual_prompt_size = visual_prompt_size
+ self.max_img_size = max_img_size
+ self.num_frames = num_frames
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.max_position_embeddings = max_position_embeddings
+ self.max_grid_col_position_embeddings = max_grid_col_position_embeddings
+ self.max_grid_row_position_embeddings = max_grid_row_position_embeddings
+ self.layer_norm_eps = layer_norm_eps
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.hidden_act = hidden_act
+ self.initializer_range = initializer_range
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+
+ @classmethod
+ def from_backbone_config(cls, backbone_config: PretrainedConfig, **kwargs):
+ """Instantiate a [`TvpConfig`] (or a derived class) from a pre-trained backbone model configuration.
+
+ Args:
+ backbone_config ([`PretrainedConfig`]):
+ The backbone configuration.
+ Returns:
+ [`TvpConfig`]: An instance of a configuration object
+ """
+ return cls(backbone_config=backbone_config, **kwargs)
+
+ def to_dict(self):
+ """
+ Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
+
+ Returns:
+ `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
+ """
+ output = copy.deepcopy(self.__dict__)
+ if output["backbone_config"] is not None:
+ output["backbone_config"] = self.backbone_config.to_dict()
+ output["model_type"] = self.__class__.model_type
+ return output
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/tvp/image_processing_tvp.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/tvp/image_processing_tvp.py
new file mode 100644
index 0000000000000000000000000000000000000000..18600ee5fbe7f3c029c60c4b15df1defe43cf6aa
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/tvp/image_processing_tvp.py
@@ -0,0 +1,502 @@
+# coding=utf-8
+# Copyright 2023 The Intel AIA Team Authors, and HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License=, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing=, software
+# distributed under the License is distributed on an "AS IS" BASIS=,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND=, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Image processor class for TVP."""
+
+from typing import Dict, Iterable, List, Optional, Tuple, Union
+
+import numpy as np
+
+from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
+from ...image_transforms import (
+ PaddingMode,
+ flip_channel_order,
+ pad,
+ resize,
+ to_channel_dimension_format,
+)
+from ...image_utils import (
+ IMAGENET_STANDARD_MEAN,
+ IMAGENET_STANDARD_STD,
+ ChannelDimension,
+ ImageInput,
+ PILImageResampling,
+ get_image_size,
+ is_valid_image,
+ to_numpy_array,
+ valid_images,
+ validate_kwargs,
+ validate_preprocess_arguments,
+)
+from ...utils import TensorType, is_vision_available, logging
+
+
+if is_vision_available():
+ import PIL
+
+
+logger = logging.get_logger(__name__)
+
+
+# Copied from transformers.models.vivit.image_processing_vivit.make_batched
+def make_batched(videos) -> List[List[ImageInput]]:
+ if isinstance(videos, (list, tuple)) and isinstance(videos[0], (list, tuple)) and is_valid_image(videos[0][0]):
+ return videos
+
+ elif isinstance(videos, (list, tuple)) and is_valid_image(videos[0]):
+ return [videos]
+
+ elif is_valid_image(videos):
+ return [[videos]]
+
+ raise ValueError(f"Could not make batched video from {videos}")
+
+
+def get_resize_output_image_size(
+ input_image: np.ndarray,
+ max_size: int = 448,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+) -> Tuple[int, int]:
+ height, width = get_image_size(input_image, input_data_format)
+ if height >= width:
+ ratio = width * 1.0 / height
+ new_height = max_size
+ new_width = new_height * ratio
+ else:
+ ratio = height * 1.0 / width
+ new_width = max_size
+ new_height = new_width * ratio
+ size = (int(new_height), int(new_width))
+
+ return size
+
+
+class TvpImageProcessor(BaseImageProcessor):
+ r"""
+ Constructs a Tvp image processor.
+
+ Args:
+ do_resize (`bool`, *optional*, defaults to `True`):
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
+ `do_resize` parameter in the `preprocess` method.
+ size (`Dict[str, int]` *optional*, defaults to `{"longest_edge": 448}`):
+ Size of the output image after resizing. The longest edge of the image will be resized to
+ `size["longest_edge"]` while maintaining the aspect ratio of the original image. Can be overriden by
+ `size` in the `preprocess` method.
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
+ Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
+ `preprocess` method.
+ do_center_crop (`bool`, *optional*, defaults to `True`):
+ Whether to center crop the image to the specified `crop_size`. Can be overridden by the `do_center_crop`
+ parameter in the `preprocess` method.
+ crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 448, "width": 448}`):
+ Size of the image after applying the center crop. Can be overridden by the `crop_size` parameter in the
+ `preprocess` method.
+ do_rescale (`bool`, *optional*, defaults to `True`):
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
+ parameter in the `preprocess` method.
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
+ Defines the scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter
+ in the `preprocess` method.
+ do_pad (`bool`, *optional*, defaults to `True`):
+ Whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess` method.
+ pad_size (`Dict[str, int]`, *optional*, defaults to `{"height": 448, "width": 448}`):
+ Size of the image after applying the padding. Can be overridden by the `pad_size` parameter in the
+ `preprocess` method.
+ constant_values (`Union[float, Iterable[float]]`, *optional*, defaults to 0):
+ The fill value to use when padding the image.
+ pad_mode (`PaddingMode`, *optional*, defaults to `PaddingMode.CONSTANT`):
+ Use what kind of mode in padding.
+ do_normalize (`bool`, *optional*, defaults to `True`):
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
+ method.
+ do_flip_channel_order (`bool`, *optional*, defaults to `True`):
+ Whether to flip the color channels from RGB to BGR. Can be overridden by the `do_flip_channel_order`
+ parameter in the `preprocess` method.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
+ """
+
+ model_input_names = ["pixel_values"]
+
+ def __init__(
+ self,
+ do_resize: bool = True,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
+ do_center_crop: bool = True,
+ crop_size: Dict[str, int] = None,
+ do_rescale: bool = True,
+ rescale_factor: Union[int, float] = 1 / 255,
+ do_pad: bool = True,
+ pad_size: Dict[str, int] = None,
+ constant_values: Union[float, Iterable[float]] = 0,
+ pad_mode: PaddingMode = PaddingMode.CONSTANT,
+ do_normalize: bool = True,
+ do_flip_channel_order: bool = True,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ **kwargs,
+ ) -> None:
+ super().__init__(**kwargs)
+ size = size if size is not None else {"longest_edge": 448}
+ crop_size = crop_size if crop_size is not None else {"height": 448, "width": 448}
+ pad_size = pad_size if pad_size is not None else {"height": 448, "width": 448}
+
+ self.do_resize = do_resize
+ self.size = size
+ self.do_center_crop = do_center_crop
+ self.crop_size = crop_size
+ self.resample = resample
+ self.do_rescale = do_rescale
+ self.rescale_factor = rescale_factor
+ self.do_pad = do_pad
+ self.pad_size = pad_size
+ self.constant_values = constant_values
+ self.pad_mode = pad_mode
+ self.do_normalize = do_normalize
+ self.do_flip_channel_order = do_flip_channel_order
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
+ self._valid_processor_keys = [
+ "videos",
+ "do_resize",
+ "size",
+ "resample",
+ "do_center_crop",
+ "crop_size",
+ "do_rescale",
+ "rescale_factor",
+ "do_pad",
+ "pad_size",
+ "constant_values",
+ "pad_mode",
+ "do_normalize",
+ "do_flip_channel_order",
+ "image_mean",
+ "image_std",
+ "return_tensors",
+ "data_format",
+ "input_data_format",
+ ]
+
+ def resize(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> np.ndarray:
+ """
+ Resize an image.
+
+ Args:
+ image (`np.ndarray`):
+ Image to resize.
+ size (`Dict[str, int]`):
+ Size of the output image. If `size` is of the form `{"height": h, "width": w}`, the output image will
+ have the size `(h, w)`. If `size` is of the form `{"longest_edge": s}`, the output image will have its
+ longest edge of length `s` while keeping the aspect ratio of the original image.
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
+ Resampling filter to use when resiizing the image.
+ data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
+ input_data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred.
+ """
+ size = get_size_dict(size, default_to_square=False)
+ if "height" in size and "width" in size:
+ output_size = (size["height"], size["width"])
+ elif "longest_edge" in size:
+ output_size = get_resize_output_image_size(image, size["longest_edge"], input_data_format)
+ else:
+ raise ValueError(f"Size must have 'height' and 'width' or 'longest_edge' as keys. Got {size.keys()}")
+
+ return resize(
+ image,
+ size=output_size,
+ resample=resample,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ **kwargs,
+ )
+
+ def pad_image(
+ self,
+ image: np.ndarray,
+ pad_size: Dict[str, int] = None,
+ constant_values: Union[float, Iterable[float]] = 0,
+ pad_mode: PaddingMode = PaddingMode.CONSTANT,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ):
+ """
+ Pad an image with zeros to the given size.
+
+ Args:
+ image (`np.ndarray`):
+ Image to pad.
+ pad_size (`Dict[str, int]`)
+ Size of the output image with pad.
+ constant_values (`Union[float, Iterable[float]]`)
+ The fill value to use when padding the image.
+ pad_mode (`PaddingMode`)
+ The pad mode, default to PaddingMode.CONSTANT
+ data_format (`ChannelDimension` or `str`, *optional*)
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred.
+ """
+ height, width = get_image_size(image, channel_dim=input_data_format)
+ max_height = pad_size.get("height", height)
+ max_width = pad_size.get("width", width)
+
+ pad_right, pad_bottom = max_width - width, max_height - height
+ if pad_right < 0 or pad_bottom < 0:
+ raise ValueError("The padding size must be greater than image size")
+
+ padding = ((0, pad_bottom), (0, pad_right))
+ padded_image = pad(
+ image,
+ padding,
+ mode=pad_mode,
+ constant_values=constant_values,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ )
+
+ return padded_image
+
+ def _preprocess_image(
+ self,
+ image: ImageInput,
+ do_resize: bool = None,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = None,
+ do_center_crop: bool = None,
+ crop_size: Dict[str, int] = None,
+ do_rescale: bool = None,
+ rescale_factor: float = None,
+ do_pad: bool = True,
+ pad_size: Dict[str, int] = None,
+ constant_values: Union[float, Iterable[float]] = None,
+ pad_mode: PaddingMode = None,
+ do_normalize: bool = None,
+ do_flip_channel_order: bool = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> np.ndarray:
+ """Preprocesses a single image."""
+
+ validate_preprocess_arguments(
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ do_pad=do_pad,
+ size_divisibility=pad_size, # here the pad() method simply requires the pad_size argument.
+ do_center_crop=do_center_crop,
+ crop_size=crop_size,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ )
+
+ # All transformations expect numpy arrays.
+ image = to_numpy_array(image)
+
+ if do_resize:
+ image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
+
+ if do_center_crop:
+ image = self.center_crop(image, size=crop_size, input_data_format=input_data_format)
+
+ if do_rescale:
+ image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
+
+ if do_normalize:
+ image = self.normalize(
+ image=image.astype(np.float32), mean=image_mean, std=image_std, input_data_format=input_data_format
+ )
+
+ if do_pad:
+ image = self.pad_image(
+ image=image,
+ pad_size=pad_size,
+ constant_values=constant_values,
+ pad_mode=pad_mode,
+ input_data_format=input_data_format,
+ )
+
+ # the pretrained checkpoints assume images are BGR, not RGB
+ if do_flip_channel_order:
+ image = flip_channel_order(image=image, input_data_format=input_data_format)
+
+ image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
+
+ return image
+
+ def preprocess(
+ self,
+ videos: Union[ImageInput, List[ImageInput], List[List[ImageInput]]],
+ do_resize: bool = None,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = None,
+ do_center_crop: bool = None,
+ crop_size: Dict[str, int] = None,
+ do_rescale: bool = None,
+ rescale_factor: float = None,
+ do_pad: bool = None,
+ pad_size: Dict[str, int] = None,
+ constant_values: Union[float, Iterable[float]] = None,
+ pad_mode: PaddingMode = None,
+ do_normalize: bool = None,
+ do_flip_channel_order: bool = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ data_format: ChannelDimension = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> PIL.Image.Image:
+ """
+ Preprocess an image or batch of images.
+
+ Args:
+ videos (`ImageInput` or `List[ImageInput]` or `List[List[ImageInput]]`):
+ Frames to preprocess.
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
+ Whether to resize the image.
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
+ Size of the image after applying resize.
+ resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
+ has an effect if `do_resize` is set to `True`.
+ do_center_crop (`bool`, *optional*, defaults to `self.do_centre_crop`):
+ Whether to centre crop the image.
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
+ Size of the image after applying the centre crop.
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
+ Whether to rescale the image values between [0 - 1].
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
+ do_pad (`bool`, *optional*, defaults to `True`):
+ Whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess` method.
+ pad_size (`Dict[str, int]`, *optional*, defaults to `{"height": 448, "width": 448}`):
+ Size of the image after applying the padding. Can be overridden by the `pad_size` parameter in the
+ `preprocess` method.
+ constant_values (`Union[float, Iterable[float]]`, *optional*, defaults to 0):
+ The fill value to use when padding the image.
+ pad_mode (`PaddingMode`, *optional*, defaults to "PaddingMode.CONSTANT"):
+ Use what kind of mode in padding.
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
+ Whether to normalize the image.
+ do_flip_channel_order (`bool`, *optional*, defaults to `self.do_flip_channel_order`):
+ Whether to flip the channel order of the image.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
+ Image mean.
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
+ Image standard deviation.
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Can be one of:
+ - Unset: Return a list of `np.ndarray`.
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format for the output image. Can be one of:
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - Unset: Use the inferred channel dimension format of the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ """
+ do_resize = do_resize if do_resize is not None else self.do_resize
+ resample = resample if resample is not None else self.resample
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
+ do_pad = do_pad if do_pad is not None else self.do_pad
+ pad_size = pad_size if pad_size is not None else self.pad_size
+ constant_values = constant_values if constant_values is not None else self.constant_values
+ pad_mode = pad_mode if pad_mode else self.pad_mode
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
+ do_flip_channel_order = (
+ do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
+ )
+ image_mean = image_mean if image_mean is not None else self.image_mean
+ image_std = image_std if image_std is not None else self.image_std
+
+ size = size if size is not None else self.size
+ size = get_size_dict(size, default_to_square=False)
+ crop_size = crop_size if crop_size is not None else self.crop_size
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
+
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
+
+ if not valid_images(videos):
+ raise ValueError(
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+
+ videos = make_batched(videos)
+
+ videos = [
+ np.array(
+ [
+ self._preprocess_image(
+ image=img,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ do_center_crop=do_center_crop,
+ crop_size=crop_size,
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_pad=do_pad,
+ pad_size=pad_size,
+ constant_values=constant_values,
+ pad_mode=pad_mode,
+ do_normalize=do_normalize,
+ do_flip_channel_order=do_flip_channel_order,
+ image_mean=image_mean,
+ image_std=image_std,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ )
+ for img in video
+ ]
+ )
+ for video in videos
+ ]
+
+ data = {"pixel_values": videos}
+ return BatchFeature(data=data, tensor_type=return_tensors)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/tvp/modeling_tvp.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/tvp/modeling_tvp.py
new file mode 100644
index 0000000000000000000000000000000000000000..da8e85da74cfbdca71897a204788c98431a00135
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/tvp/modeling_tvp.py
@@ -0,0 +1,892 @@
+# coding=utf-8
+# Copyright 2023 The Intel AIA Team Authors, and HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License=, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing=, software
+# distributed under the License is distributed on an "AS IS" BASIS=,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND=, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch TVP Model"""
+
+import math
+from dataclasses import dataclass
+from typing import Optional, Tuple
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+
+from ...activations import ACT2FN
+from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
+from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ModelOutput
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import prune_linear_layer
+from ...utils import logging
+from ...utils.backbone_utils import load_backbone
+from .configuration_tvp import TvpConfig
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import TVP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+@dataclass
+class TvpVideoGroundingOutput(ModelOutput):
+ """
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
+ Temporal-Distance IoU loss for video grounding.
+ logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
+ Contains start_time/duration and end_time/duration. It is the time slot of the videos corresponding to the
+ input texts.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of
+ the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+class TvpLoss(nn.Module):
+ """
+ This class computes the losses for `TvpForVideoGrounding`. The process happens in two steps: 1) we compute
+ hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched
+ ground-truth / prediction (supervise class and box).
+
+ Args:
+ losses (`List[str]`):
+ List of all the losses to be applied.
+ """
+
+ def __init__(self, losses):
+ super().__init__()
+ self.loss_map = {
+ "iou": self.loss_iou,
+ "distance": self.loss_distance,
+ "duration": self.loss_duration,
+ }
+ for loss in losses:
+ if loss not in self.loss_map:
+ raise ValueError(f"Loss {loss} not supported")
+
+ self.losses = losses
+
+ def loss_iou(self, start_time, end_time, candidates_start_time, candidates_end_time, duration):
+ """
+ Measure the intersection over union.
+ """
+ inter = torch.min(candidates_end_time, end_time) - torch.max(candidates_start_time, start_time)
+ union = torch.max(candidates_end_time, end_time) - torch.min(candidates_start_time, start_time)
+ iou = 1 - inter.clamp(min=0) / union
+
+ return iou
+
+ def loss_distance(self, start_time, end_time, candidates_start_time, candidates_end_time, duration):
+ """
+ Measure the distance of mid points.
+ """
+ mid_candidates = torch.div(torch.add(candidates_start_time, candidates_end_time), 2.0)
+ mid_groundtruth = torch.div(torch.add(start_time, end_time), 2.0)
+ distance_diff = torch.div(
+ torch.max(mid_candidates, mid_groundtruth) - torch.min(mid_candidates, mid_groundtruth), duration
+ ).clamp(min=0.2)
+
+ return distance_diff
+
+ def loss_duration(self, start_time, end_time, candidates_start_time, candidates_end_time, duration):
+ """
+ Measure the difference of duration.
+ """
+ duration_candidates = torch.sub(candidates_end_time, candidates_start_time)
+ duration_groundtruth = torch.sub(end_time, start_time)
+ duration_diff = torch.square(torch.div(torch.sub(duration_candidates, duration_groundtruth), duration))
+ duration_diff = duration_diff.clamp(min=0.4)
+
+ return duration_diff
+
+ def forward(self, logits, labels):
+ """
+ This performs the loss computation.
+
+ Args:
+ logits (`torch.FloatTensor`):
+ The output logits of head module.
+ labels (`List[torch.FloatTensor]`):
+ List of tensors ([start, end, duration]), which contains start time, end time of the video corresponding to the text, and also the duration.
+ """
+ duration, start_time, end_time = labels
+ candidates = torch.mul(logits, duration)
+ candidates_start_time, candidates_end_time = candidates[:, 0].float(), candidates[:, 1].float()
+
+ losses_dict = {}
+ for loss in self.losses:
+ losses_dict.update(
+ {loss: self.loss_map[loss](start_time, end_time, candidates_start_time, candidates_end_time, duration)}
+ )
+
+ return losses_dict
+
+
+class TvpVisionModel(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.backbone = load_backbone(config)
+ self.grid_encoder_conv = nn.Conv2d(
+ config.backbone_config.hidden_sizes[-1],
+ config.hidden_size,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ groups=1,
+ bias=False,
+ )
+
+ def forward(self, pixel_values):
+ batch_size, num_frames, num_channels, height, width = pixel_values.shape
+ # (batch_size * num_frames, num_channels, height, width)
+ pixel_values = pixel_values.view(batch_size * num_frames, num_channels, height, width)
+ grid_feat_outputs = self.backbone(pixel_values)["feature_maps"][0]
+ grid = self.grid_encoder_conv(grid_feat_outputs)
+ grid = nn.functional.max_pool2d(grid, kernel_size=2, stride=2)
+ grid = nn.functional.relu(grid, inplace=True)
+ new_channel, new_height, new_width = grid.shape[-3:]
+ # (batch_size, num_frames, num_channels, height, width)
+ grid = grid.view(batch_size, num_frames, new_channel, new_height, new_width)
+ # (batch_size, num_frames, height, width, num_channels)
+ grid = grid.permute(0, 1, 3, 4, 2)
+ return grid
+
+
+class TvpVisualInputEmbedding(nn.Module):
+ """
+ Takes input of both image and video (multi-frame)
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ # sequence embedding
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
+ self.row_position_embeddings = nn.Embedding(config.max_grid_row_position_embeddings, config.hidden_size)
+ self.col_position_embeddings = nn.Embedding(config.max_grid_col_position_embeddings, config.hidden_size)
+ self.token_type_embeddings = nn.Embedding(1, config.hidden_size)
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def add_2d_positional_embeddings(self, grid):
+ """
+ Args:
+ grid: (batch_size, height, width, hidden_dim)
+ Returns:
+ grid + col_position_embeddings.view(*col_shape): (batch_size, *, height, width, hidden_dim)
+ """
+ batch_size, height, width, hidden_dim = grid.shape
+
+ # add row-wise position embeddings
+ row_position_ids = torch.arange(height, dtype=torch.long, device=grid.device) # (height, )
+ row_position_embeddings = self.row_position_embeddings(row_position_ids) # (height, hidden_dim)
+ row_shape = (1,) * (len(grid.shape) - 3) + (height, 1, hidden_dim) # (1, height, 1, hidden_dim)
+ grid = grid + row_position_embeddings.view(*row_shape) # broadcast automatically
+
+ # add column-wise position embeddings
+ col_position_ids = torch.arange(width, dtype=torch.long, device=grid.device) # (width, )
+ col_position_embeddings = self.col_position_embeddings(col_position_ids) # (width, hidden_dim)
+ col_shape = (batch_size, 1, width, hidden_dim) # (1, 1, width, hidden_dim)
+ return grid + col_position_embeddings.view(*col_shape) # broadcast automatically
+
+ def forward(self, grid):
+ """
+ Args:
+ grid: Array of shape (batch_size, num_frames, height, width, num_channels).
+ It contains processed frames extracted from videos, and is generated by Tvp image preprocessor. Note,
+ num_frames can be 1
+
+ Returns:
+ embeddings: The embedding of grid with size (batch_size, height*width, num_channels)
+
+ """
+ batch_size, num_frames, height, width, num_channels = grid.shape
+ # temporal mean pooling, (batch_size, height, width, hidden_size)
+ grid = grid.mean(1)
+ grid = self.add_2d_positional_embeddings(grid)
+ # image token sequence, (batch_size, height*width, num_channels)
+ visual_tokens = grid.view(batch_size, -1, num_channels)
+ visual_tokens_shape = visual_tokens.shape[:-1]
+ device = visual_tokens.device
+
+ # image token type embeddings.
+ token_type_ids = torch.zeros(visual_tokens_shape, dtype=torch.long, device=device)
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
+
+ embeddings = visual_tokens + token_type_embeddings
+ embeddings = self.layer_norm(embeddings)
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+
+class TvpTextInputEmbeddings(nn.Module):
+ """Construct the embeddings from word, position and token_type embeddings."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
+ if input_ids is not None:
+ input_shape = input_ids.size()
+ else:
+ input_shape = inputs_embeds.size()[:-1]
+
+ seq_length = input_shape[1]
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+ if position_ids is None:
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
+ position_ids = position_ids.unsqueeze(0).expand(input_shape)
+ if token_type_ids is None:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+ position_embeddings = self.position_embeddings(position_ids)
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
+
+ embeddings = inputs_embeds + position_embeddings + token_type_embeddings
+ embeddings = self.layer_norm(embeddings)
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+
+class TvpAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size {config.hidden_size} is not a multiple of the number of attention heads {config.num_attention_heads}"
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
+ self.attn_dropout = nn.Dropout(config.attention_probs_dropout_prob)
+
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ mask = torch.ones(self.num_attention_heads, self.attention_head_size)
+ heads = set(heads) - self.pruned_heads # Convert to set and remove already pruned heads
+ for head in heads:
+ # Compute how many pruned heads are before the head and move the index accordingly
+ head = head - sum(1 if h < head else 0 for h in self.pruned_heads)
+ mask[head] = 0
+ mask = mask.view(-1).contiguous().eq(1)
+ index = torch.arange(len(mask))[mask].long()
+
+ # Prune linear layers
+ self.query = prune_linear_layer(self.query, index)
+ self.key = prune_linear_layer(self.key, index)
+ self.value = prune_linear_layer(self.value, index)
+ self.dense = prune_linear_layer(self.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.num_attention_heads = self.num_attention_heads - len(heads)
+ self.all_head_size = self.attention_head_size * self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def _reshape(self, tensor: torch.Tensor, sequence_length: int, batch_size: int):
+ return (
+ tensor.view(batch_size, sequence_length, self.num_attention_heads, self.attention_head_size)
+ .transpose(1, 2)
+ .contiguous()
+ )
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ output_attentions: Optional[bool] = None,
+ ):
+ batch_size, sequence_length = hidden_states.shape[:2]
+ mixed_query_layer = self.query(hidden_states)
+
+ mixed_key_layer = self.key(hidden_states)
+ mixed_value_layer = self.value(hidden_states)
+
+ query_layer = self._reshape(mixed_query_layer, sequence_length, batch_size)
+ key_layer = self._reshape(mixed_key_layer, sequence_length, batch_size)
+ value_layer = self._reshape(mixed_value_layer, sequence_length, batch_size)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+ if attention_mask is not None:
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.attn_dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ attn_output = torch.matmul(attention_probs, value_layer)
+ attn_output = attn_output.transpose(1, 2).contiguous()
+ attn_output = attn_output.reshape(batch_size, sequence_length, self.all_head_size)
+
+ attn_output = self.dense(attn_output)
+ attn_output = self.dropout(attn_output)
+ attn_output = self.layer_norm(attn_output + hidden_states)
+ # add attentions if we output them
+ outputs = (attn_output, attention_probs) if output_attentions else (attn_output,)
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Tvp
+class TvpIntermediate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+class TvpOutputLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.layer_norm(hidden_states + input_tensor)
+ return hidden_states
+
+
+class TvpEncodeLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.attention = TvpAttention(config)
+ self.intermediate = TvpIntermediate(config)
+ self.output = TvpOutputLayer(config)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ output_attentions: Optional[bool] = None,
+ ):
+ self_attention_outputs = self.attention(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ output_attentions=output_attentions,
+ )
+ attention_output = self_attention_outputs[0]
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+ intermediate_output = self.intermediate(attention_output)
+ layer_output = self.output(intermediate_output, attention_output)
+ outputs = (layer_output,) + outputs
+ return outputs
+
+
+class TvpEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList([TvpEncodeLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ):
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ all_hidden_states = ()
+ all_attentions = ()
+
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ attention_mask,
+ (head_mask[i] if head_mask is not None else None),
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i], output_attentions)
+
+ hidden_states = layer_outputs[0]
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ # Add last layer
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ outputs = (hidden_states,)
+ if output_hidden_states:
+ outputs = outputs + (all_hidden_states,)
+ if output_attentions:
+ outputs = outputs + (all_attentions,)
+ return outputs # last-layer hidden state, (all hidden states), (all attentions)
+
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states if output_hidden_states else None,
+ attentions=all_attentions if output_attentions else None,
+ )
+
+
+# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->Tvp
+class TvpPooler(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.activation = nn.Tanh()
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(first_token_tensor)
+ pooled_output = self.activation(pooled_output)
+ return pooled_output
+
+
+class TvpPreTrainedModel(PreTrainedModel):
+ """An abstract class to handle weights initialization and
+ a simple interface for downloading and loading pretrained models.
+ """
+
+ config_class = TvpConfig
+ base_model_prefix = "model"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, (nn.Linear, nn.Embedding)):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+ if isinstance(module, nn.Linear) and module.bias is not None:
+ module.bias.data.zero_()
+
+ if isinstance(module, nn.Conv2d):
+ nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu")
+ if module.bias is not None:
+ nn.init.constant_(module.bias, 0)
+
+
+TVP_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`TvpConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+TVP_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
+ IDs?](../glossary#input-ids)
+
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`TvpImageProcessor`]. See [`TvpImageProcessor.__call__`]
+ for details.
+
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ [What are attention masks?](../glossary#attention-mask)
+
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+class TvpFrameDownPadPrompter(nn.Module):
+ """
+ Pad frames extracted from videos only at the bottom.
+ """
+
+ def __init__(self, config):
+ if config.visual_prompter_apply not in ("add", "replace", "remove"):
+ raise ValueError("`visual_prompter_apply` must be in (add, replace, remove)")
+
+ super().__init__()
+ self.visual_prompt_size = config.visual_prompt_size
+ self.frame_num = config.frame_num
+ self.max_img_size = config.max_img_size
+ self.visual_prompter_apply = config.visual_prompter_apply
+
+ self.pad_down = nn.Parameter(
+ torch.randn([1, config.frame_num, 3, config.visual_prompt_size, config.max_img_size])
+ )
+
+ def forward(self, pixel_values):
+ if self.visual_prompter_apply != "add":
+ visual_prompt_mask = torch.ones(
+ [self.max_img_size, self.max_img_size], dtype=pixel_values.dtype, device=pixel_values.device
+ )
+ visual_prompt_mask[self.max_img_size - self.visual_prompt_size : self.max_img_size, :] = 0.0
+ pixel_values *= visual_prompt_mask
+ if self.visual_prompter_apply != "remove":
+ prompt = torch.zeros(
+ [pixel_values.shape[0], pixel_values.shape[1], 3, self.max_img_size, self.max_img_size],
+ device=pixel_values.device,
+ )
+ start_point = self.max_img_size - self.visual_prompt_size
+ prompt[:, :, :, start_point : self.max_img_size, :] = self.pad_down
+ pixel_values += prompt.to(pixel_values.dtype)
+ return pixel_values
+
+
+class TvpFramePadPrompter(nn.Module):
+ """
+ Pad frames extracted from videos in the surroundings.
+ """
+
+ def __init__(self, config):
+ if config.visual_prompter_apply not in ("add", "replace", "remove"):
+ raise ValueError("`visual_prompter_apply` must be in (add, replace, remove)")
+
+ super().__init__()
+ self.num_frames = config.num_frames
+ self.max_img_size = config.max_img_size
+ self.visual_prompter_apply = config.visual_prompter_apply
+
+ self.base_size = config.max_img_size - config.visual_prompt_size * 2
+ self.pad_up = nn.Parameter(
+ torch.randn([1, config.num_frames, 3, config.visual_prompt_size, config.max_img_size])
+ )
+ self.pad_down = nn.Parameter(
+ torch.randn([1, config.num_frames, 3, config.visual_prompt_size, config.max_img_size])
+ )
+ self.pad_left = nn.Parameter(
+ torch.randn(
+ [
+ 1,
+ config.num_frames,
+ 3,
+ config.max_img_size - config.visual_prompt_size * 2,
+ config.visual_prompt_size,
+ ]
+ )
+ )
+ self.pad_right = nn.Parameter(
+ torch.randn(
+ [
+ 1,
+ config.num_frames,
+ 3,
+ config.max_img_size - config.visual_prompt_size * 2,
+ config.visual_prompt_size,
+ ]
+ )
+ )
+
+ def forward(self, pixel_values):
+ if self.visual_prompter_apply not in ("add", "remove", "replace"):
+ raise ValueError(f"Invalid visual_prompter_apply value {self.visual_prompter_apply}")
+ if self.visual_prompter_apply in ("replace", "remove"):
+ visual_prompt_mask = torch.ones(
+ [self.max_img_size, self.max_img_size], dtype=pixel_values.dtype, device=pixel_values.device
+ )
+ pixel_values *= visual_prompt_mask
+ if self.visual_prompter_apply in ("replace", "add"):
+ base = torch.zeros(1, self.num_frames, 3, self.base_size, self.base_size, device=pixel_values.device)
+ prompt = torch.cat([self.pad_left, base, self.pad_right], dim=4)
+ prompt = torch.cat([self.pad_up, prompt, self.pad_down], dim=3)
+ prompt = torch.cat(pixel_values.size(0) * [prompt])
+ pixel_values = pixel_values + prompt.to(pixel_values.dtype)
+ return pixel_values
+
+
+TVP_PROMPTER_CLASSES_MAPPING = {
+ "framedownpad": TvpFrameDownPadPrompter,
+ "framepad": TvpFramePadPrompter,
+}
+
+
+@add_start_docstrings(
+ "The bare Tvp Model transformer outputting BaseModelOutputWithPooling object without any specific head on" " top.",
+ TVP_START_DOCSTRING,
+)
+class TvpModel(TvpPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.config = config
+ self.vision_model = TvpVisionModel(config)
+ self.embeddings = TvpTextInputEmbeddings(config)
+ self.visual_embeddings = TvpVisualInputEmbedding(config)
+ self.encoder = TvpEncoder(config)
+ self.pooler = TvpPooler(config)
+ self.text_prompt = nn.Parameter(torch.randn([1, 10, config.hidden_size]))
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ if config.visual_prompter_type not in TVP_PROMPTER_CLASSES_MAPPING:
+ raise ValueError("`visual_prompter_type` must be in (framedownpad, framepad)")
+ self.visual_prompter = TVP_PROMPTER_CLASSES_MAPPING[config.visual_prompter_type](config)
+
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.word_embeddings = value
+
+ def _prune_heads(self, heads_to_prune):
+ """Prunes heads of the model.
+ heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(TVP_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=TvpConfig)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ):
+ r"""
+ Returns:
+
+ Examples:
+ ```python
+ >>> import torch
+ >>> from transformers import AutoConfig, AutoTokenizer, TvpModel
+
+ >>> model = TvpModel.from_pretrained("Jiqing/tiny-random-tvp")
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("Jiqing/tiny-random-tvp")
+
+ >>> pixel_values = torch.rand(1, 1, 3, 448, 448)
+ >>> text_inputs = tokenizer("This is an example input", return_tensors="pt")
+ >>> output = model(text_inputs.input_ids, pixel_values, text_inputs.attention_mask)
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ # Add visual prompt, it compensates for the spatiotemporal information loss in 2D visual features.
+ pixel_values = self.vision_model(self.visual_prompter(pixel_values))
+ # (batch_size, sequence_length, hidden_size)
+ text_embedding_output = self.embeddings(input_ids=input_ids)
+ # (batch_size, visual_sequence_length, hidden_size)
+ visual_embedding_output = self.visual_embeddings(pixel_values)
+ if attention_mask is not None:
+ # (batch_size, visual_sequence_length)
+ visual_attention_mask = attention_mask.new_ones(visual_embedding_output.shape[:2])
+ pt_mask = torch.ones(attention_mask.shape[0], 10).to(
+ device=attention_mask.device, dtype=attention_mask.dtype
+ )
+ attention_mask = torch.cat([pt_mask, attention_mask, visual_attention_mask], dim=-1)
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
+ # ourselves in which case we just need to make it broadcastable to all heads.
+ attention_mask = self.get_extended_attention_mask(attention_mask, input_ids.size()).to(input_ids.device)
+ text_prompt = self.text_prompt.expand(text_embedding_output.shape[0], -1, -1)
+ # (batch_size, sequence_length + visual_sequence_length, hidden_size)
+ embedding_output = torch.cat([text_prompt, text_embedding_output, visual_embedding_output], dim=1)
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ attention_mask=attention_mask,
+ head_mask=self.get_head_mask(head_mask, self.config.num_hidden_layers),
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ last_hidden_state = encoder_outputs.last_hidden_state if return_dict else encoder_outputs[0]
+ pooled_output = self.pooler(last_hidden_state)
+ last_hidden_state = self.dropout(last_hidden_state)
+ pooled_output = self.dropout(pooled_output)
+ if not return_dict:
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPooling(
+ last_hidden_state=last_hidden_state,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+class TvpVideoGroundingHead(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.layer_0 = nn.Linear(config.hidden_size, config.hidden_size * 2)
+ self.layer_1 = nn.Linear(config.hidden_size * 2, 2)
+ self.activation_0 = nn.ReLU()
+ self.activation_1 = nn.Sigmoid()
+
+ def forward(self, pooler_output):
+ logits = self.activation_0(self.layer_0(pooler_output))
+ logits = self.activation_1(self.layer_1(logits))
+ return logits
+
+
+@add_start_docstrings(
+ """
+ Tvp Model with a video grounding head on top computing IoU, distance, and duration loss.
+ """,
+ TVP_START_DOCSTRING,
+)
+class TvpForVideoGrounding(TvpPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.config = config
+ self.model = TvpModel(config)
+ self.video_grounding_head = TvpVideoGroundingHead(config)
+
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(TVP_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TvpVideoGroundingOutput, config_class=TvpConfig)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ labels: Tuple[torch.Tensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ):
+ r"""
+ labels (`torch.FloatTensor` of shape `(batch_size, 3)`, *optional*):
+ The labels contains duration, start time, and end time of the video corresponding to the text.
+ Returns:
+
+ Examples:
+ ```python
+ >>> import torch
+ >>> from transformers import AutoConfig, AutoTokenizer, TvpForVideoGrounding
+
+ >>> model = TvpForVideoGrounding.from_pretrained("Jiqing/tiny-random-tvp")
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("Jiqing/tiny-random-tvp")
+
+ >>> pixel_values = torch.rand(1, 1, 3, 448, 448)
+ >>> text_inputs = tokenizer("This is an example input", return_tensors="pt")
+ >>> output = model(text_inputs.input_ids, pixel_values, text_inputs.attention_mask)
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+ outputs = self.model(
+ input_ids,
+ pixel_values,
+ attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ pooler_output = outputs[1]
+
+ logits = self.video_grounding_head(pooler_output)
+
+ loss = None
+ if labels is not None:
+ criterion = TvpLoss(["iou", "distance", "duration"])
+ criterion.to(self.device)
+ loss_dict = criterion(logits, labels)
+ loss = (
+ loss_dict["iou"]
+ + self.config.distance_loss_weight * loss_dict["distance"]
+ + self.config.duration_loss_weight * loss_dict["duration"]
+ )
+
+ if not return_dict:
+ outputs = (logits,) + outputs[2:]
+ if loss is not None:
+ outputs = (loss,) + outputs
+ return outputs
+
+ return TvpVideoGroundingOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/tvp/processing_tvp.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/tvp/processing_tvp.py
new file mode 100644
index 0000000000000000000000000000000000000000..4e27399ab8053fdd62c42edacc2c2d1dd8586920
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/tvp/processing_tvp.py
@@ -0,0 +1,154 @@
+# coding=utf-8
+# Copyright 2023 The Intel AIA Team Authors, and HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License=, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing=, software
+# distributed under the License is distributed on an "AS IS" BASIS=,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND=, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Processor class for TVP.
+"""
+
+
+from ...processing_utils import ProcessorMixin
+from ...tokenization_utils_base import BatchEncoding
+
+
+class TvpProcessor(ProcessorMixin):
+ r"""
+ Constructs an TVP processor which wraps a TVP image processor and a Bert tokenizer into a single processor.
+
+ [`TvpProcessor`] offers all the functionalities of [`TvpImageProcessor`] and [`BertTokenizerFast`]. See the
+ [`~TvpProcessor.__call__`] and [`~TvpProcessor.decode`] for more information.
+
+ Args:
+ image_processor ([`TvpImageProcessor`], *optional*):
+ The image processor is a required input.
+ tokenizer ([`BertTokenizerFast`], *optional*):
+ The tokenizer is a required input.
+ """
+
+ attributes = ["image_processor", "tokenizer"]
+ image_processor_class = "TvpImageProcessor"
+ tokenizer_class = ("BertTokenizer", "BertTokenizerFast")
+
+ def __init__(self, image_processor=None, tokenizer=None, **kwargs):
+ if image_processor is None:
+ raise ValueError("You need to specify an `image_processor`.")
+ if tokenizer is None:
+ raise ValueError("You need to specify a `tokenizer`.")
+
+ super().__init__(image_processor, tokenizer)
+
+ def __call__(self, text=None, videos=None, return_tensors=None, **kwargs):
+ """
+ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
+ and `kwargs` arguments to BertTokenizerFast's [`~BertTokenizerFast.__call__`] if `text` is not `None` to encode
+ the text. To prepare the image(s), this method forwards the `videos` and `kwargs` arguments to
+ TvpImageProcessor's [`~TvpImageProcessor.__call__`] if `videos` is not `None`. Please refer to the doctsring of
+ the above two methods for more information.
+
+ Args:
+ text (`str`, `List[str]`, `List[List[str]]`):
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
+ videos (`List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`, `List[List[PIL.Image.Image]]`, `List[List[np.ndarrray]]`,:
+ `List[List[torch.Tensor]]`): The video or batch of videos to be prepared. Each video should be a list
+ of frames, which can be either PIL images or NumPy arrays. In case of NumPy arrays/PyTorch tensors,
+ each frame should be of shape (H, W, C), where H and W are frame height and width, and C is a number of
+ channels.
+
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
+ If set, will return tensors of a particular framework. Acceptable values are:
+
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
+ - `'np'`: Return NumPy `np.ndarray` objects.
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
+
+ Returns:
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
+
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
+ `None`).
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `videos` is not `None`.
+ """
+
+ max_text_length = kwargs.pop("max_text_length", None)
+
+ if text is None and videos is None:
+ raise ValueError("You have to specify either text or videos. Both cannot be none.")
+
+ encoding = {}
+ if text is not None:
+ textual_input = self.tokenizer.batch_encode_plus(
+ text,
+ truncation=True,
+ padding="max_length",
+ max_length=max_text_length,
+ pad_to_max_length=True,
+ return_tensors=return_tensors,
+ return_token_type_ids=False,
+ **kwargs,
+ )
+ encoding.update(textual_input)
+
+ if videos is not None:
+ image_features = self.image_processor(videos, return_tensors=return_tensors, **kwargs)
+ encoding.update(image_features)
+
+ return BatchEncoding(data=encoding, tensor_type=return_tensors)
+
+ def batch_decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
+ refer to the docstring of this method for more information.
+ """
+ return self.tokenizer.batch_decode(*args, **kwargs)
+
+ def decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
+ the docstring of this method for more information.
+ """
+ return self.tokenizer.decode(*args, **kwargs)
+
+ def post_process_video_grounding(self, logits, video_durations):
+ """
+ Compute the time of the video.
+
+ Args:
+ logits (`torch.Tensor`):
+ The logits output of TvpForVideoGrounding.
+ video_durations (`float`):
+ The video's duration.
+
+ Returns:
+ start (`float`):
+ The start time of the video.
+ end (`float`):
+ The end time of the video.
+ """
+ start, end = (
+ round(logits.tolist()[0][0] * video_durations, 1),
+ round(logits.tolist()[0][1] * video_durations, 1),
+ )
+
+ return start, end
+
+ @property
+ # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
+ def model_input_names(self):
+ tokenizer_input_names = self.tokenizer.model_input_names
+ image_processor_input_names = self.image_processor.model_input_names
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))